2024-12-06 08:11:26,424 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-06 08:11:26,440 main DEBUG Took 0.013587 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-06 08:11:26,440 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-06 08:11:26,441 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-06 08:11:26,442 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-06 08:11:26,443 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,451 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-06 08:11:26,466 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,467 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,468 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,468 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,469 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,469 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,471 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,471 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,472 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,472 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,473 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,473 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,474 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,474 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,475 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,475 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,476 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,476 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,477 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,477 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,478 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,478 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,479 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,479 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-06 08:11:26,480 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,480 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-06 08:11:26,482 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-06 08:11:26,484 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-06 08:11:26,486 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-06 08:11:26,487 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-06 08:11:26,488 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-06 08:11:26,489 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-06 08:11:26,498 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-06 08:11:26,501 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-06 08:11:26,503 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-06 08:11:26,503 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-06 08:11:26,504 main DEBUG createAppenders(={Console}) 2024-12-06 08:11:26,504 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc initialized 2024-12-06 08:11:26,505 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc 2024-12-06 08:11:26,505 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@55de24cc OK. 2024-12-06 08:11:26,506 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-06 08:11:26,506 main DEBUG OutputStream closed 2024-12-06 08:11:26,507 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-06 08:11:26,507 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-06 08:11:26,507 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@53ce1329 OK 2024-12-06 08:11:26,590 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-06 08:11:26,592 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-06 08:11:26,594 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-06 08:11:26,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-06 08:11:26,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-06 08:11:26,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-06 08:11:26,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-06 08:11:26,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-06 08:11:26,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-06 08:11:26,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-06 08:11:26,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-06 08:11:26,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-06 08:11:26,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-06 08:11:26,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-06 08:11:26,600 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-06 08:11:26,600 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-06 08:11:26,600 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-06 08:11:26,601 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-06 08:11:26,603 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06 08:11:26,604 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-06 08:11:26,604 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-06 08:11:26,605 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-06T08:11:26,881 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0 2024-12-06 08:11:26,884 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-06 08:11:26,885 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-06T08:11:26,894 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-06T08:11:26,928 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=262, ProcessCount=11, AvailableMemoryMB=9032 2024-12-06T08:11:26,931 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:11:26,934 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b, deleteOnExit=true 2024-12-06T08:11:26,935 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T08:11:26,936 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/test.cache.data in system properties and HBase conf 2024-12-06T08:11:26,936 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:11:26,937 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:11:26,938 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:11:26,939 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:11:26,939 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T08:11:27,049 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-06T08:11:27,187 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:11:27,192 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:11:27,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:11:27,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:11:27,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:11:27,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:11:27,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:11:27,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:11:27,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:11:27,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:11:27,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:11:27,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:11:27,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:11:27,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:11:27,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:11:27,738 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:11:28,108 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-06T08:11:28,187 INFO [Time-limited test {}] log.Log(170): Logging initialized @2568ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-06T08:11:28,259 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:11:28,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:11:28,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:11:28,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:11:28,347 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:11:28,360 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:11:28,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@88aab13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:11:28,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74468826{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:11:28,579 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5682c4d1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/java.io.tmpdir/jetty-localhost-45225-hadoop-hdfs-3_4_1-tests_jar-_-any-6965408650513594826/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:11:28,586 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ff1a6c1{HTTP/1.1, (http/1.1)}{localhost:45225} 2024-12-06T08:11:28,587 INFO [Time-limited test {}] server.Server(415): Started @2969ms 2024-12-06T08:11:28,621 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:11:28,990 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:11:28,997 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:11:29,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:11:29,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:11:29,001 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:11:29,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2276bd44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:11:29,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b4ce9e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:11:29,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6aad8790{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/java.io.tmpdir/jetty-localhost-43835-hadoop-hdfs-3_4_1-tests_jar-_-any-3768464257646904783/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:11:29,128 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@587d1dca{HTTP/1.1, (http/1.1)}{localhost:43835} 2024-12-06T08:11:29,129 INFO [Time-limited test {}] server.Server(415): Started @3511ms 2024-12-06T08:11:29,185 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:11:29,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:11:29,321 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:11:29,323 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:11:29,323 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:11:29,323 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:11:29,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4debea22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:11:29,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6eb1b261{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:11:29,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@163cfad6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/java.io.tmpdir/jetty-localhost-33519-hadoop-hdfs-3_4_1-tests_jar-_-any-15461055344834332835/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:11:29,465 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f952caa{HTTP/1.1, (http/1.1)}{localhost:33519} 2024-12-06T08:11:29,465 INFO [Time-limited test {}] server.Server(415): Started @3848ms 2024-12-06T08:11:29,468 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:11:29,669 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data3/current/BP-2060022616-172.17.0.2-1733472687841/current, will proceed with Du for space computation calculation, 2024-12-06T08:11:29,669 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data1/current/BP-2060022616-172.17.0.2-1733472687841/current, will proceed with Du for space computation calculation, 2024-12-06T08:11:29,669 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data2/current/BP-2060022616-172.17.0.2-1733472687841/current, will proceed with Du for space computation calculation, 2024-12-06T08:11:29,671 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data4/current/BP-2060022616-172.17.0.2-1733472687841/current, will proceed with Du for space computation calculation, 2024-12-06T08:11:29,743 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:11:29,744 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:11:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82c213333a69cb32 with lease ID 0x8a3800dc69be2ed6: Processing first storage report for DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779 from datanode DatanodeRegistration(127.0.0.1:43367, datanodeUuid=8d79b965-5f09-408d-875f-9be18fd8760a, infoPort=35337, infoSecurePort=0, ipcPort=44839, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841) 2024-12-06T08:11:29,838 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82c213333a69cb32 with lease ID 0x8a3800dc69be2ed6: from storage DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779 node DatanodeRegistration(127.0.0.1:43367, datanodeUuid=8d79b965-5f09-408d-875f-9be18fd8760a, infoPort=35337, infoSecurePort=0, ipcPort=44839, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-06T08:11:29,839 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24af8146da720be4 with lease ID 0x8a3800dc69be2ed7: Processing first storage report for DS-ef306e84-a195-4a3f-96d1-d8ae12675401 from datanode DatanodeRegistration(127.0.0.1:42039, datanodeUuid=08f5db71-ca13-4e86-aca8-d2947505f0b7, infoPort=38183, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841) 2024-12-06T08:11:29,839 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24af8146da720be4 with lease ID 0x8a3800dc69be2ed7: from storage DS-ef306e84-a195-4a3f-96d1-d8ae12675401 node DatanodeRegistration(127.0.0.1:42039, datanodeUuid=08f5db71-ca13-4e86-aca8-d2947505f0b7, infoPort=38183, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:11:29,839 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82c213333a69cb32 with lease ID 0x8a3800dc69be2ed6: Processing first storage report for DS-c9b61a30-0700-4a71-8060-fb30fbffd2de from datanode DatanodeRegistration(127.0.0.1:43367, datanodeUuid=8d79b965-5f09-408d-875f-9be18fd8760a, infoPort=35337, infoSecurePort=0, ipcPort=44839, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841) 2024-12-06T08:11:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82c213333a69cb32 with lease ID 0x8a3800dc69be2ed6: from storage DS-c9b61a30-0700-4a71-8060-fb30fbffd2de node DatanodeRegistration(127.0.0.1:43367, datanodeUuid=8d79b965-5f09-408d-875f-9be18fd8760a, infoPort=35337, infoSecurePort=0, ipcPort=44839, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:11:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24af8146da720be4 with lease ID 0x8a3800dc69be2ed7: Processing first storage report for DS-dd24733a-9f7c-4678-b572-eb3184e48bb5 from datanode DatanodeRegistration(127.0.0.1:42039, datanodeUuid=08f5db71-ca13-4e86-aca8-d2947505f0b7, infoPort=38183, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841) 2024-12-06T08:11:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24af8146da720be4 with lease ID 0x8a3800dc69be2ed7: from storage DS-dd24733a-9f7c-4678-b572-eb3184e48bb5 node DatanodeRegistration(127.0.0.1:42039, datanodeUuid=08f5db71-ca13-4e86-aca8-d2947505f0b7, infoPort=38183, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=125952323;c=1733472687841), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:11:29,890 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0 2024-12-06T08:11:29,970 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/zookeeper_0, clientPort=58605, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:11:29,980 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=58605 2024-12-06T08:11:29,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:29,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:30,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:11:30,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:11:30,661 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24 with version=8 2024-12-06T08:11:30,661 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/hbase-staging 2024-12-06T08:11:30,788 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-06T08:11:31,059 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:11:31,079 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:11:31,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:11:31,080 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:11:31,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:11:31,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:11:31,214 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:11:31,278 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-06T08:11:31,287 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-06T08:11:31,291 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:11:31,321 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 28936 (auto-detected) 2024-12-06T08:11:31,322 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-06T08:11:31,341 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37937 2024-12-06T08:11:31,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:31,352 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:31,365 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:37937 connecting to ZooKeeper ensemble=127.0.0.1:58605 2024-12-06T08:11:31,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379370x0, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:11:31,401 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37937-0x1006661d8030000 connected 2024-12-06T08:11:31,429 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:11:31,432 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:11:31,435 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:11:31,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37937 2024-12-06T08:11:31,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37937 2024-12-06T08:11:31,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37937 2024-12-06T08:11:31,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37937 2024-12-06T08:11:31,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37937 2024-12-06T08:11:31,447 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24, hbase.cluster.distributed=false 2024-12-06T08:11:31,508 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:11:31,508 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:11:31,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:11:31,509 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:11:31,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:11:31,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:11:31,511 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:11:31,514 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:11:31,515 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38061 2024-12-06T08:11:31,516 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:11:31,522 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:11:31,523 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:31,527 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:31,532 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:38061 connecting to ZooKeeper ensemble=127.0.0.1:58605 2024-12-06T08:11:31,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380610x0, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:11:31,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:11:31,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38061-0x1006661d8030001 connected 2024-12-06T08:11:31,539 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:11:31,540 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:11:31,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38061 2024-12-06T08:11:31,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38061 2024-12-06T08:11:31,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38061 2024-12-06T08:11:31,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38061 2024-12-06T08:11:31,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38061 2024-12-06T08:11:31,548 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b6b797fc3981,37937,1733472690782 2024-12-06T08:11:31,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:11:31,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:11:31,559 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b6b797fc3981,37937,1733472690782 2024-12-06T08:11:31,564 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b6b797fc3981:37937 2024-12-06T08:11:31,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:11:31,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:11:31,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:31,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:31,583 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:11:31,583 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:11:31,584 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b6b797fc3981,37937,1733472690782 from backup master directory 2024-12-06T08:11:31,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b6b797fc3981,37937,1733472690782 2024-12-06T08:11:31,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:11:31,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:11:31,588 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:11:31,588 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b6b797fc3981,37937,1733472690782 2024-12-06T08:11:31,591 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-06T08:11:31,592 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-06T08:11:31,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:11:31,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:11:31,685 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/hbase.id with ID: 780d1529-8ef7-4e09-a0b6-ac19cfa9c8e5 2024-12-06T08:11:31,730 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:31,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:31,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:31,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:11:31,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:11:31,794 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:11:31,796 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:11:31,802 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:11:31,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:11:31,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:11:31,855 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store 2024-12-06T08:11:31,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:11:31,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:11:31,877 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-06T08:11:31,878 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:31,879 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:11:31,879 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:11:31,879 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:11:31,879 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:11:31,879 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:11:31,879 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:11:31,879 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:11:31,881 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/.initializing 2024-12-06T08:11:31,881 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/WALs/b6b797fc3981,37937,1733472690782 2024-12-06T08:11:31,897 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C37937%2C1733472690782, suffix=, logDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/WALs/b6b797fc3981,37937,1733472690782, archiveDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/oldWALs, maxLogs=10 2024-12-06T08:11:31,906 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C37937%2C1733472690782.1733472691903 2024-12-06T08:11:31,906 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-12-06T08:11:31,907 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-12-06T08:11:31,925 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/WALs/b6b797fc3981,37937,1733472690782/b6b797fc3981%2C37937%2C1733472690782.1733472691903 2024-12-06T08:11:31,934 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:11:31,934 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:11:31,935 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:31,938 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:31,939 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:31,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:32,008 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:11:32,013 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:11:32,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:32,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:11:32,022 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:11:32,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:32,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:11:32,027 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:11:32,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:32,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:11:32,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:11:32,037 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:32,038 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:32,046 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:11:32,050 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:11:32,054 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:11:32,055 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792513, jitterRate=0.007733166217803955}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:11:32,059 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:11:32,060 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:11:32,091 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bb7585d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:11:32,128 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T08:11:32,141 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:11:32,142 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:11:32,144 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:11:32,146 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-06T08:11:32,152 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-06T08:11:32,152 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:11:32,178 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:11:32,193 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:11:32,197 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:11:32,200 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:11:32,201 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:11:32,206 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:11:32,208 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:11:32,212 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:11:32,214 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:11:32,216 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:11:32,218 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:11:32,228 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:11:32,230 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:11:32,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:11:32,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:11:32,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,235 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b6b797fc3981,37937,1733472690782, sessionid=0x1006661d8030000, setting cluster-up flag (Was=false) 2024-12-06T08:11:32,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,258 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:11:32,260 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,37937,1733472690782 2024-12-06T08:11:32,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,272 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:11:32,273 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,37937,1733472690782 2024-12-06T08:11:32,364 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b6b797fc3981:38061 2024-12-06T08:11:32,364 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T08:11:32,366 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1008): ClusterId : 780d1529-8ef7-4e09-a0b6-ac19cfa9c8e5 2024-12-06T08:11:32,370 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:11:32,372 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T08:11:32,375 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:11:32,376 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:11:32,377 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:11:32,380 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:11:32,381 DEBUG [RS:0;b6b797fc3981:38061 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58404ac4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:11:32,383 DEBUG [RS:0;b6b797fc3981:38061 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6630c1ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:11:32,382 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b6b797fc3981,37937,1733472690782 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:11:32,386 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:11:32,386 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:11:32,387 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:11:32,387 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:11:32,387 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:11:32,387 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b6b797fc3981:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:11:32,387 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:11:32,387 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:11:32,387 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,388 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:11:32,388 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,390 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733472722390 2024-12-06T08:11:32,390 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,37937,1733472690782 with isa=b6b797fc3981/172.17.0.2:38061, startcode=1733472691507 2024-12-06T08:11:32,392 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:11:32,393 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:11:32,394 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:11:32,394 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:11:32,397 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:11:32,397 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:11:32,398 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:11:32,398 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:11:32,398 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,399 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:11:32,399 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,402 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:11:32,404 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:11:32,404 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:11:32,405 DEBUG [RS:0;b6b797fc3981:38061 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:11:32,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:11:32,407 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:11:32,412 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472692408,5,FailOnTimeoutGroup] 2024-12-06T08:11:32,412 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472692412,5,FailOnTimeoutGroup] 2024-12-06T08:11:32,412 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,413 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:11:32,414 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,414 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:11:32,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:11:32,420 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T08:11:32,421 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24 2024-12-06T08:11:32,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:11:32,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:11:32,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:32,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:11:32,440 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:11:32,441 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:11:32,442 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:11:32,445 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:11:32,445 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:11:32,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:11:32,449 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:11:32,450 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:32,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:11:32,452 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740 2024-12-06T08:11:32,453 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740 2024-12-06T08:11:32,457 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:11:32,460 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:11:32,465 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:11:32,466 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774972, jitterRate=-0.014572694897651672}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:11:32,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:11:32,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:11:32,470 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:11:32,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:11:32,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:11:32,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:11:32,471 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:11:32,471 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:11:32,474 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:11:32,475 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T08:11:32,478 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53611, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:11:32,483 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37937 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,38061,1733472691507 2024-12-06T08:11:32,484 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:11:32,486 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37937 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,38061,1733472691507 2024-12-06T08:11:32,495 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:11:32,497 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:11:32,501 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24 2024-12-06T08:11:32,501 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:39643 2024-12-06T08:11:32,501 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:11:32,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:11:32,507 DEBUG [RS:0;b6b797fc3981:38061 {}] zookeeper.ZKUtil(111): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,38061,1733472691507 2024-12-06T08:11:32,507 WARN [RS:0;b6b797fc3981:38061 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:11:32,507 INFO [RS:0;b6b797fc3981:38061 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:11:32,507 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507 2024-12-06T08:11:32,509 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,38061,1733472691507] 2024-12-06T08:11:32,524 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:11:32,536 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:11:32,547 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:11:32,551 INFO [RS:0;b6b797fc3981:38061 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:11:32,551 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,552 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:11:32,558 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,559 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,559 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,559 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,559 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,559 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,559 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:11:32,559 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,560 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,560 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,560 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,560 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:11:32,560 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:11:32,560 DEBUG [RS:0;b6b797fc3981:38061 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:11:32,561 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,561 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,561 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,561 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,562 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,38061,1733472691507-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:11:32,590 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:11:32,593 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,38061,1733472691507-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:32,612 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.Replication(204): b6b797fc3981,38061,1733472691507 started 2024-12-06T08:11:32,612 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,38061,1733472691507, RpcServer on b6b797fc3981/172.17.0.2:38061, sessionid=0x1006661d8030001 2024-12-06T08:11:32,613 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:11:32,613 DEBUG [RS:0;b6b797fc3981:38061 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,38061,1733472691507 2024-12-06T08:11:32,613 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,38061,1733472691507' 2024-12-06T08:11:32,613 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:11:32,614 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:11:32,615 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:11:32,615 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:11:32,615 DEBUG [RS:0;b6b797fc3981:38061 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,38061,1733472691507 2024-12-06T08:11:32,615 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,38061,1733472691507' 2024-12-06T08:11:32,615 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:11:32,616 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:11:32,616 DEBUG [RS:0;b6b797fc3981:38061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:11:32,617 INFO [RS:0;b6b797fc3981:38061 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:11:32,617 INFO [RS:0;b6b797fc3981:38061 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:11:32,647 WARN [b6b797fc3981:37937 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:11:32,726 INFO [RS:0;b6b797fc3981:38061 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C38061%2C1733472691507, suffix=, logDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507, archiveDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs, maxLogs=32 2024-12-06T08:11:32,729 INFO [RS:0;b6b797fc3981:38061 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472692729 2024-12-06T08:11:32,738 INFO [RS:0;b6b797fc3981:38061 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472692729 2024-12-06T08:11:32,738 DEBUG [RS:0;b6b797fc3981:38061 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38183:38183),(127.0.0.1/127.0.0.1:35337:35337)] 2024-12-06T08:11:32,899 DEBUG [b6b797fc3981:37937 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T08:11:32,904 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b6b797fc3981,38061,1733472691507 2024-12-06T08:11:32,908 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,38061,1733472691507, state=OPENING 2024-12-06T08:11:32,914 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:11:32,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:32,917 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:11:32,917 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:11:32,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b6b797fc3981,38061,1733472691507}] 2024-12-06T08:11:33,095 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38061,1733472691507 2024-12-06T08:11:33,096 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:11:33,100 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:11:33,110 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T08:11:33,111 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:11:33,115 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C38061%2C1733472691507.meta, suffix=.meta, logDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507, archiveDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs, maxLogs=32 2024-12-06T08:11:33,117 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.meta.1733472693116.meta 2024-12-06T08:11:33,125 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.meta.1733472693116.meta 2024-12-06T08:11:33,125 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38183:38183),(127.0.0.1/127.0.0.1:35337:35337)] 2024-12-06T08:11:33,125 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:11:33,127 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:11:33,189 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:11:33,194 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:11:33,198 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:11:33,198 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:33,198 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T08:11:33,199 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T08:11:33,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:11:33,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:11:33,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:33,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:11:33,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:11:33,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:11:33,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:33,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:11:33,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:11:33,209 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:11:33,209 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:33,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:11:33,211 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740 2024-12-06T08:11:33,213 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740 2024-12-06T08:11:33,216 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:11:33,219 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:11:33,220 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786655, jitterRate=2.844482660293579E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:11:33,222 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:11:33,229 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733472693089 2024-12-06T08:11:33,240 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:11:33,241 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T08:11:33,241 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38061,1733472691507 2024-12-06T08:11:33,243 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,38061,1733472691507, state=OPEN 2024-12-06T08:11:33,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:11:33,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:11:33,249 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:11:33,249 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:11:33,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:11:33,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b6b797fc3981,38061,1733472691507 in 330 msec 2024-12-06T08:11:33,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:11:33,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 772 msec 2024-12-06T08:11:33,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 954 msec 2024-12-06T08:11:33,265 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733472693265, completionTime=-1 2024-12-06T08:11:33,266 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T08:11:33,266 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T08:11:33,309 DEBUG [hconnection-0x672637fe-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:11:33,311 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:11:33,322 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T08:11:33,322 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733472753322 2024-12-06T08:11:33,322 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733472813322 2024-12-06T08:11:33,322 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 55 msec 2024-12-06T08:11:33,344 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37937,1733472690782-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:33,344 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37937,1733472690782-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:33,344 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37937,1733472690782-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:33,346 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b6b797fc3981:37937, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:33,346 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:33,352 DEBUG [master/b6b797fc3981:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T08:11:33,355 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T08:11:33,356 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:11:33,362 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T08:11:33,365 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:11:33,366 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:33,368 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:11:33,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:11:33,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:11:33,384 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d71ae187633f0d1a9491b2cc77af8371, NAME => 'hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24 2024-12-06T08:11:33,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:11:33,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:11:33,395 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:33,395 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing d71ae187633f0d1a9491b2cc77af8371, disabling compactions & flushes 2024-12-06T08:11:33,396 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:11:33,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:11:33,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. after waiting 0 ms 2024-12-06T08:11:33,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:11:33,396 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:11:33,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for d71ae187633f0d1a9491b2cc77af8371: 2024-12-06T08:11:33,398 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:11:33,405 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733472693400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472693400"}]},"ts":"1733472693400"} 2024-12-06T08:11:33,431 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:11:33,433 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:11:33,436 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472693433"}]},"ts":"1733472693433"} 2024-12-06T08:11:33,440 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T08:11:33,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d71ae187633f0d1a9491b2cc77af8371, ASSIGN}] 2024-12-06T08:11:33,449 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d71ae187633f0d1a9491b2cc77af8371, ASSIGN 2024-12-06T08:11:33,451 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=d71ae187633f0d1a9491b2cc77af8371, ASSIGN; state=OFFLINE, location=b6b797fc3981,38061,1733472691507; forceNewPlan=false, retain=false 2024-12-06T08:11:33,602 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d71ae187633f0d1a9491b2cc77af8371, regionState=OPENING, regionLocation=b6b797fc3981,38061,1733472691507 2024-12-06T08:11:33,608 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure d71ae187633f0d1a9491b2cc77af8371, server=b6b797fc3981,38061,1733472691507}] 2024-12-06T08:11:33,762 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38061,1733472691507 2024-12-06T08:11:33,769 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:11:33,769 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => d71ae187633f0d1a9491b2cc77af8371, NAME => 'hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:11:33,770 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:11:33,770 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:33,770 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:11:33,770 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:11:33,772 INFO [StoreOpener-d71ae187633f0d1a9491b2cc77af8371-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:11:33,775 INFO [StoreOpener-d71ae187633f0d1a9491b2cc77af8371-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d71ae187633f0d1a9491b2cc77af8371 columnFamilyName info 2024-12-06T08:11:33,776 DEBUG [StoreOpener-d71ae187633f0d1a9491b2cc77af8371-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:33,777 INFO [StoreOpener-d71ae187633f0d1a9491b2cc77af8371-1 {}] regionserver.HStore(327): Store=d71ae187633f0d1a9491b2cc77af8371/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:11:33,778 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:11:33,779 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:11:33,783 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:11:33,786 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:11:33,787 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened d71ae187633f0d1a9491b2cc77af8371; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742496, jitterRate=-0.05586816370487213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:11:33,788 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for d71ae187633f0d1a9491b2cc77af8371: 2024-12-06T08:11:33,790 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371., pid=6, masterSystemTime=1733472693762 2024-12-06T08:11:33,794 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:11:33,794 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:11:33,795 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d71ae187633f0d1a9491b2cc77af8371, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38061,1733472691507 2024-12-06T08:11:33,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:11:33,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure d71ae187633f0d1a9491b2cc77af8371, server=b6b797fc3981,38061,1733472691507 in 191 msec 2024-12-06T08:11:33,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:11:33,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=d71ae187633f0d1a9491b2cc77af8371, ASSIGN in 356 msec 2024-12-06T08:11:33,808 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:11:33,809 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472693808"}]},"ts":"1733472693808"} 2024-12-06T08:11:33,812 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T08:11:33,816 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:11:33,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 460 msec 2024-12-06T08:11:33,866 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T08:11:33,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:33,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:11:33,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:11:33,908 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T08:11:33,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:11:33,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-06T08:11:33,942 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T08:11:33,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:11:33,960 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 17 msec 2024-12-06T08:11:33,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T08:11:33,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T08:11:33,972 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 2.383sec 2024-12-06T08:11:33,974 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:11:33,975 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:11:33,976 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:11:33,977 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:11:33,977 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:11:33,978 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37937,1733472690782-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:11:33,978 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37937,1733472690782-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:11:33,985 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:11:33,986 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:11:33,986 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37937,1733472690782-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:11:34,065 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08f373c4 to 127.0.0.1:58605 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59dcded7 2024-12-06T08:11:34,066 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-06T08:11:34,073 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@586ea9a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:11:34,075 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-06T08:11:34,075 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-06T08:11:34,084 DEBUG [hconnection-0x4002cb39-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:11:34,113 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:11:34,123 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b6b797fc3981,37937,1733472690782 2024-12-06T08:11:34,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:11:34,132 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T08:11:34,138 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:11:34,141 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:11:34,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37937 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T08:11:34,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37937 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T08:11:34,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37937 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:11:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37937 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-06T08:11:34,155 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:11:34,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37937 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 9 2024-12-06T08:11:34,155 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:34,157 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:11:34,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37937 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:11:34,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741837_1013 (size=389) 2024-12-06T08:11:34,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741837_1013 (size=389) 2024-12-06T08:11:34,171 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 62a856be0e3cc633d1481fac3ee088ad, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24 2024-12-06T08:11:34,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741838_1014 (size=72) 2024-12-06T08:11:34,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741838_1014 (size=72) 2024-12-06T08:11:34,182 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:34,182 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 62a856be0e3cc633d1481fac3ee088ad, disabling compactions & flushes 2024-12-06T08:11:34,182 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:34,182 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:34,182 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. after waiting 0 ms 2024-12-06T08:11:34,182 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:34,182 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:34,182 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:11:34,184 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:11:34,185 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733472694184"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472694184"}]},"ts":"1733472694184"} 2024-12-06T08:11:34,215 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:11:34,221 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:11:34,222 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472694221"}]},"ts":"1733472694221"} 2024-12-06T08:11:34,226 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-06T08:11:34,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=62a856be0e3cc633d1481fac3ee088ad, ASSIGN}] 2024-12-06T08:11:34,234 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=62a856be0e3cc633d1481fac3ee088ad, ASSIGN 2024-12-06T08:11:34,236 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=62a856be0e3cc633d1481fac3ee088ad, ASSIGN; state=OFFLINE, location=b6b797fc3981,38061,1733472691507; forceNewPlan=false, retain=false 2024-12-06T08:11:34,387 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=62a856be0e3cc633d1481fac3ee088ad, regionState=OPENING, regionLocation=b6b797fc3981,38061,1733472691507 2024-12-06T08:11:34,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 62a856be0e3cc633d1481fac3ee088ad, server=b6b797fc3981,38061,1733472691507}] 2024-12-06T08:11:34,544 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,38061,1733472691507 2024-12-06T08:11:34,551 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:34,551 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 62a856be0e3cc633d1481fac3ee088ad, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:11:34,552 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:34,552 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:11:34,552 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:34,552 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:34,554 INFO [StoreOpener-62a856be0e3cc633d1481fac3ee088ad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:34,556 INFO [StoreOpener-62a856be0e3cc633d1481fac3ee088ad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62a856be0e3cc633d1481fac3ee088ad columnFamilyName info 2024-12-06T08:11:34,557 DEBUG [StoreOpener-62a856be0e3cc633d1481fac3ee088ad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:11:34,558 INFO [StoreOpener-62a856be0e3cc633d1481fac3ee088ad-1 {}] regionserver.HStore(327): Store=62a856be0e3cc633d1481fac3ee088ad/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:11:34,559 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:34,559 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:34,563 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:34,566 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:11:34,567 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 62a856be0e3cc633d1481fac3ee088ad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763441, jitterRate=-0.02923545241355896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:11:34,568 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:11:34,569 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad., pid=11, masterSystemTime=1733472694544 2024-12-06T08:11:34,572 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:34,572 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:34,573 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=62a856be0e3cc633d1481fac3ee088ad, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,38061,1733472691507 2024-12-06T08:11:34,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T08:11:34,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 62a856be0e3cc633d1481fac3ee088ad, server=b6b797fc3981,38061,1733472691507 in 185 msec 2024-12-06T08:11:34,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T08:11:34,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=62a856be0e3cc633d1481fac3ee088ad, ASSIGN in 347 msec 2024-12-06T08:11:34,584 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:11:34,584 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472694584"}]},"ts":"1733472694584"} 2024-12-06T08:11:34,586 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-06T08:11:34,590 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:11:34,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 439 msec 2024-12-06T08:11:38,702 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-06T08:11:38,742 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T08:11:38,743 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T08:11:38,743 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-06T08:11:41,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-06T08:11:41,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-06T08:11:41,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-06T08:11:41,277 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T08:11:41,277 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-06T08:11:41,278 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-06T08:11:41,278 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:11:41,278 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-06T08:11:41,279 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-06T08:11:41,279 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-06T08:11:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37937 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:11:44,168 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling, procId: 9 completed 2024-12-06T08:11:44,173 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-06T08:11:44,174 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:11:44,175 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472704175 2024-12-06T08:11:44,194 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472692729 with entries=4, filesize=947 B; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472704175 2024-12-06T08:11:44,196 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:11:44,196 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472692729 is not closed yet, will try archiving it next time 2024-12-06T08:11:44,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741833_1009 (size=955) 2024-12-06T08:11:44,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741833_1009 (size=955) 2024-12-06T08:11:56,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8581): Flush requested on 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:11:56,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62a856be0e3cc633d1481fac3ee088ad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:11:56,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/fd06ffec19c5476281d10688b0ce453d is 1080, key is row0001/info:/1733472704211/Put/seqid=0 2024-12-06T08:11:56,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741840_1016 (size=12509) 2024-12-06T08:11:56,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741840_1016 (size=12509) 2024-12-06T08:11:56,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/fd06ffec19c5476281d10688b0ce453d 2024-12-06T08:11:56,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/fd06ffec19c5476281d10688b0ce453d as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d 2024-12-06T08:11:56,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d, entries=7, sequenceid=11, filesize=12.2 K 2024-12-06T08:11:56,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 62a856be0e3cc633d1481fac3ee088ad in 137ms, sequenceid=11, compaction requested=false 2024-12-06T08:11:56,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:11:59,885 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:12:02,385 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:12:02,387 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:12:04,247 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472724247 2024-12-06T08:12:04,456 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:04,457 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472704175 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472724247 2024-12-06T08:12:04,457 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:12:04,458 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472704175 is not closed yet, will try archiving it next time 2024-12-06T08:12:04,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741839_1015 (size=12399) 2024-12-06T08:12:04,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741839_1015 (size=12399) 2024-12-06T08:12:04,660 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:06,864 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:09,068 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:11,271 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:11,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8581): Flush requested on 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:12:11,272 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62a856be0e3cc633d1481fac3ee088ad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:12:11,474 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:11,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/9888d1dbbb9a4df29c92582be88925fc is 1080, key is row0008/info:/1733472718238/Put/seqid=0 2024-12-06T08:12:11,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741842_1018 (size=12509) 2024-12-06T08:12:11,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741842_1018 (size=12509) 2024-12-06T08:12:11,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/9888d1dbbb9a4df29c92582be88925fc 2024-12-06T08:12:11,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/9888d1dbbb9a4df29c92582be88925fc as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/9888d1dbbb9a4df29c92582be88925fc 2024-12-06T08:12:11,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/9888d1dbbb9a4df29c92582be88925fc, entries=7, sequenceid=21, filesize=12.2 K 2024-12-06T08:12:11,712 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:11,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 62a856be0e3cc633d1481fac3ee088ad in 440ms, sequenceid=21, compaction requested=false 2024-12-06T08:12:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:12:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=24.4 K, sizeToCheck=16.0 K 2024-12-06T08:12:11,713 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:12:11,714 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d because midkey is the same as first or last row 2024-12-06T08:12:13,475 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:13,989 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T08:12:13,989 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T08:12:15,678 WARN [sync.1 {}] wal.AbstractFSWAL(1346): Requesting log roll because we exceeded slow sync threshold; count=7, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:15,679 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C38061%2C1733472691507:(num 1733472724247) roll requested 2024-12-06T08:12:15,680 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:15,680 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472735680 2024-12-06T08:12:15,889 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:16,090 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:16,091 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472724247 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472735680 2024-12-06T08:12:16,091 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38183:38183),(127.0.0.1/127.0.0.1:35337:35337)] 2024-12-06T08:12:16,091 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472724247 is not closed yet, will try archiving it next time 2024-12-06T08:12:16,093 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472704175 to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs/b6b797fc3981%2C38061%2C1733472691507.1733472704175 2024-12-06T08:12:16,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741841_1017 (size=7739) 2024-12-06T08:12:16,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741841_1017 (size=7739) 2024-12-06T08:12:17,882 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:19,552 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 62a856be0e3cc633d1481fac3ee088ad, had cached 0 bytes from a total of 25018 2024-12-06T08:12:20,086 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:22,289 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:24,493 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:26,495 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T08:12:26,496 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472746496 2024-12-06T08:12:29,886 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:12:31,512 INFO [Time-limited test {}] wal.AbstractFSWAL(1183): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:31,512 WARN [Time-limited test {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:31,512 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C38061%2C1733472691507:(num 1733472746496) roll requested 2024-12-06T08:12:33,382 DEBUG [master/b6b797fc3981:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d71ae187633f0d1a9491b2cc77af8371 changed from -1.0 to 0.0, refreshing cache 2024-12-06T08:12:36,512 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:36,513 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK], DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK]] 2024-12-06T08:12:36,513 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472735680 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472746496 2024-12-06T08:12:36,513 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:12:36,513 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472735680 is not closed yet, will try archiving it next time 2024-12-06T08:12:36,514 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472756514 2024-12-06T08:12:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741843_1019 (size=4753) 2024-12-06T08:12:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741843_1019 (size=4753) 2024-12-06T08:12:41,516 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:41,516 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:41,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8581): Flush requested on 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:12:41,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62a856be0e3cc633d1481fac3ee088ad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:12:41,523 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:41,523 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:43,517 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T08:12:46,519 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:46,519 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:46,523 INFO [sync.0 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:46,523 WARN [sync.0 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:46,524 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472746496 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472756514 2024-12-06T08:12:46,525 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:12:46,525 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472746496 is not closed yet, will try archiving it next time 2024-12-06T08:12:46,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a64b5fa1956d4731b75ce0b7056a493f is 1080, key is row0015/info:/1733472733273/Put/seqid=0 2024-12-06T08:12:46,525 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C38061%2C1733472691507:(num 1733472756514) roll requested 2024-12-06T08:12:46,525 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472766525 2024-12-06T08:12:46,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741844_1020 (size=1569) 2024-12-06T08:12:46,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741844_1020 (size=1569) 2024-12-06T08:12:46,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741846_1022 (size=12509) 2024-12-06T08:12:46,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741846_1022 (size=12509) 2024-12-06T08:12:46,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a64b5fa1956d4731b75ce0b7056a493f 2024-12-06T08:12:46,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a64b5fa1956d4731b75ce0b7056a493f as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a64b5fa1956d4731b75ce0b7056a493f 2024-12-06T08:12:46,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a64b5fa1956d4731b75ce0b7056a493f, entries=7, sequenceid=31, filesize=12.2 K 2024-12-06T08:12:51,534 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(1183): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:51,534 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:51,560 INFO [sync.1 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:51,560 WARN [sync.1 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:51,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 62a856be0e3cc633d1481fac3ee088ad in 10043ms, sequenceid=31, compaction requested=true 2024-12-06T08:12:51,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:12:51,561 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=36.6 K, sizeToCheck=16.0 K 2024-12-06T08:12:51,561 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:12:51,561 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d because midkey is the same as first or last row 2024-12-06T08:12:51,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 62a856be0e3cc633d1481fac3ee088ad:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:12:51,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:12:51,564 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:12:51,567 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:12:51,568 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.HStore(1540): 62a856be0e3cc633d1481fac3ee088ad/info is initiating minor compaction (all files) 2024-12-06T08:12:51,568 INFO [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 62a856be0e3cc633d1481fac3ee088ad/info in TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:12:51,568 INFO [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d, hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/9888d1dbbb9a4df29c92582be88925fc, hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a64b5fa1956d4731b75ce0b7056a493f] into tmpdir=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp, totalSize=36.6 K 2024-12-06T08:12:51,570 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd06ffec19c5476281d10688b0ce453d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733472704211 2024-12-06T08:12:51,570 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9888d1dbbb9a4df29c92582be88925fc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733472718238 2024-12-06T08:12:51,571 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] compactions.Compactor(224): Compacting a64b5fa1956d4731b75ce0b7056a493f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733472733273 2024-12-06T08:12:51,601 INFO [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 62a856be0e3cc633d1481fac3ee088ad#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:12:51,602 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/4bb4cb54b20c4ff4af934d9dcf26e228 is 1080, key is row0001/info:/1733472704211/Put/seqid=0 2024-12-06T08:12:51,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741848_1024 (size=27710) 2024-12-06T08:12:51,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741848_1024 (size=27710) 2024-12-06T08:12:51,619 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/4bb4cb54b20c4ff4af934d9dcf26e228 as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/4bb4cb54b20c4ff4af934d9dcf26e228 2024-12-06T08:12:56,534 INFO [sync.2 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:56,534 WARN [sync.2 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:12:56,535 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472756514 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472766525 2024-12-06T08:12:56,535 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:12:56,535 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472756514 is not closed yet, will try archiving it next time 2024-12-06T08:12:56,535 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472724247 to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs/b6b797fc3981%2C38061%2C1733472691507.1733472724247 2024-12-06T08:12:56,535 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C38061%2C1733472691507:(num 1733472766525) roll requested 2024-12-06T08:12:56,535 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472776535 2024-12-06T08:12:56,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741845_1021 (size=438) 2024-12-06T08:12:56,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741845_1021 (size=438) 2024-12-06T08:12:56,538 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472735680 to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs/b6b797fc3981%2C38061%2C1733472691507.1733472735680 2024-12-06T08:12:56,539 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472746496 to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs/b6b797fc3981%2C38061%2C1733472691507.1733472746496 2024-12-06T08:12:56,541 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472756514 to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs/b6b797fc3981%2C38061%2C1733472691507.1733472756514 2024-12-06T08:12:59,886 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:13:01,536 INFO [sync.3 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:13:01,536 WARN [sync.3 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:13:01,538 INFO [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 62a856be0e3cc633d1481fac3ee088ad/info of 62a856be0e3cc633d1481fac3ee088ad into 4bb4cb54b20c4ff4af934d9dcf26e228(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 9sec to execute. 2024-12-06T08:13:01,538 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:13:01,538 INFO [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad., storeName=62a856be0e3cc633d1481fac3ee088ad/info, priority=13, startTime=1733472771563; duration=9sec 2024-12-06T08:13:01,539 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=27.1 K, sizeToCheck=16.0 K 2024-12-06T08:13:01,539 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:13:01,539 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/4bb4cb54b20c4ff4af934d9dcf26e228 because midkey is the same as first or last row 2024-12-06T08:13:01,539 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:13:01,539 DEBUG [RS:0;b6b797fc3981:38061-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 62a856be0e3cc633d1481fac3ee088ad:info 2024-12-06T08:13:01,544 INFO [sync.4 {}] wal.AbstractFSWAL(1183): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:13:01,544 WARN [sync.4 {}] wal.AbstractFSWAL(1189): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43367,DS-8885ca9b-c78a-4414-8ef6-c2bece9f9779,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-ef306e84-a195-4a3f-96d1-d8ae12675401,DISK]] 2024-12-06T08:13:01,545 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472766525 with entries=1, filesize=531 B; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472776535 2024-12-06T08:13:01,545 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:13:01,545 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472766525 is not closed yet, will try archiving it next time 2024-12-06T08:13:01,545 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472781545 2024-12-06T08:13:01,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741847_1023 (size=539) 2024-12-06T08:13:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741847_1023 (size=539) 2024-12-06T08:13:01,548 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472766525 to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs/b6b797fc3981%2C38061%2C1733472691507.1733472766525 2024-12-06T08:13:01,558 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472776535 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472781545 2024-12-06T08:13:01,559 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:13:01,559 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472776535 is not closed yet, will try archiving it next time 2024-12-06T08:13:01,559 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C38061%2C1733472691507:(num 1733472781545) roll requested 2024-12-06T08:13:01,559 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C38061%2C1733472691507.1733472781559 2024-12-06T08:13:01,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741849_1025 (size=1258) 2024-12-06T08:13:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741849_1025 (size=1258) 2024-12-06T08:13:01,567 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472781545 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472781559 2024-12-06T08:13:01,567 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35337:35337),(127.0.0.1/127.0.0.1:38183:38183)] 2024-12-06T08:13:01,567 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472781545 is not closed yet, will try archiving it next time 2024-12-06T08:13:01,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741850_1026 (size=93) 2024-12-06T08:13:01,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741850_1026 (size=93) 2024-12-06T08:13:01,569 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507/b6b797fc3981%2C38061%2C1733472691507.1733472781545 to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs/b6b797fc3981%2C38061%2C1733472691507.1733472781545 2024-12-06T08:13:04,553 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 62a856be0e3cc633d1481fac3ee088ad, had cached 0 bytes from a total of 27710 2024-12-06T08:13:13,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38061 {}] regionserver.HRegion(8581): Flush requested on 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:13:13,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 62a856be0e3cc633d1481fac3ee088ad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:13:13,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a4377ac394ba4491a29cfc117cd0f2fc is 1080, key is row0022/info:/1733472781546/Put/seqid=0 2024-12-06T08:13:13,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741852_1028 (size=12509) 2024-12-06T08:13:13,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741852_1028 (size=12509) 2024-12-06T08:13:13,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a4377ac394ba4491a29cfc117cd0f2fc 2024-12-06T08:13:13,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a4377ac394ba4491a29cfc117cd0f2fc as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a4377ac394ba4491a29cfc117cd0f2fc 2024-12-06T08:13:13,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a4377ac394ba4491a29cfc117cd0f2fc, entries=7, sequenceid=42, filesize=12.2 K 2024-12-06T08:13:13,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 62a856be0e3cc633d1481fac3ee088ad in 35ms, sequenceid=42, compaction requested=false 2024-12-06T08:13:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:13:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=39.3 K, sizeToCheck=16.0 K 2024-12-06T08:13:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:13:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/4bb4cb54b20c4ff4af934d9dcf26e228 because midkey is the same as first or last row 2024-12-06T08:13:21,575 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T08:13:21,575 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:13:21,575 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08f373c4 to 127.0.0.1:58605 2024-12-06T08:13:21,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:13:21,576 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:13:21,576 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1928901483, stopped=false 2024-12-06T08:13:21,576 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b6b797fc3981,37937,1733472690782 2024-12-06T08:13:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:13:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:13:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:21,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:21,578 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T08:13:21,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:13:21,579 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,38061,1733472691507' ***** 2024-12-06T08:13:21,579 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:13:21,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:13:21,579 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:13:21,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:13:21,579 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:13:21,580 INFO [RS:0;b6b797fc3981:38061 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:13:21,580 INFO [RS:0;b6b797fc3981:38061 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:13:21,580 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(3579): Received CLOSE for 62a856be0e3cc633d1481fac3ee088ad 2024-12-06T08:13:21,581 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(3579): Received CLOSE for d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:13:21,581 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,38061,1733472691507 2024-12-06T08:13:21,581 DEBUG [RS:0;b6b797fc3981:38061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:13:21,581 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:13:21,581 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:13:21,581 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:13:21,581 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:13:21,581 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 62a856be0e3cc633d1481fac3ee088ad, disabling compactions & flushes 2024-12-06T08:13:21,581 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:13:21,581 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:13:21,581 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. after waiting 0 ms 2024-12-06T08:13:21,581 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:13:21,581 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T08:13:21,581 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 62a856be0e3cc633d1481fac3ee088ad 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-06T08:13:21,582 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1603): Online Regions={62a856be0e3cc633d1481fac3ee088ad=TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad., 1588230740=hbase:meta,,1.1588230740, d71ae187633f0d1a9491b2cc77af8371=hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371.} 2024-12-06T08:13:21,582 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:13:21,582 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:13:21,582 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:13:21,582 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:13:21,582 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:13:21,582 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.81 KB heapSize=5.32 KB 2024-12-06T08:13:21,582 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 62a856be0e3cc633d1481fac3ee088ad, d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:13:21,588 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a07b488a3b8e4e91be9cdd14b5cf240b is 1080, key is row0029/info:/1733472795567/Put/seqid=0 2024-12-06T08:13:21,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741853_1029 (size=8193) 2024-12-06T08:13:21,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741853_1029 (size=8193) 2024-12-06T08:13:21,604 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/.tmp/info/d7bb0f7edeb64b86be29b553f7a8064c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad./info:regioninfo/1733472694573/Put/seqid=0 2024-12-06T08:13:21,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741854_1030 (size=8172) 2024-12-06T08:13:21,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741854_1030 (size=8172) 2024-12-06T08:13:21,611 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.59 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/.tmp/info/d7bb0f7edeb64b86be29b553f7a8064c 2024-12-06T08:13:21,611 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T08:13:21,611 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T08:13:21,637 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/.tmp/table/cf123269b07142b6bfbe6fd86e45320c is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733472694584/Put/seqid=0 2024-12-06T08:13:21,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741855_1031 (size=5452) 2024-12-06T08:13:21,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741855_1031 (size=5452) 2024-12-06T08:13:21,644 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=232 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/.tmp/table/cf123269b07142b6bfbe6fd86e45320c 2024-12-06T08:13:21,652 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/.tmp/info/d7bb0f7edeb64b86be29b553f7a8064c as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/info/d7bb0f7edeb64b86be29b553f7a8064c 2024-12-06T08:13:21,661 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/info/d7bb0f7edeb64b86be29b553f7a8064c, entries=20, sequenceid=14, filesize=8.0 K 2024-12-06T08:13:21,662 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/.tmp/table/cf123269b07142b6bfbe6fd86e45320c as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/table/cf123269b07142b6bfbe6fd86e45320c 2024-12-06T08:13:21,670 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/table/cf123269b07142b6bfbe6fd86e45320c, entries=4, sequenceid=14, filesize=5.3 K 2024-12-06T08:13:21,671 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.81 KB/2882, heapSize ~5.04 KB/5160, currentSize=0 B/0 for 1588230740 in 89ms, sequenceid=14, compaction requested=false 2024-12-06T08:13:21,678 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-06T08:13:21,680 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:13:21,681 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:13:21,681 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:13:21,681 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T08:13:21,782 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1629): Waiting on 62a856be0e3cc633d1481fac3ee088ad, d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:13:21,983 DEBUG [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1629): Waiting on 62a856be0e3cc633d1481fac3ee088ad, d71ae187633f0d1a9491b2cc77af8371 2024-12-06T08:13:21,996 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a07b488a3b8e4e91be9cdd14b5cf240b 2024-12-06T08:13:22,005 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/.tmp/info/a07b488a3b8e4e91be9cdd14b5cf240b as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a07b488a3b8e4e91be9cdd14b5cf240b 2024-12-06T08:13:22,012 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a07b488a3b8e4e91be9cdd14b5cf240b, entries=3, sequenceid=48, filesize=8.0 K 2024-12-06T08:13:22,013 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 62a856be0e3cc633d1481fac3ee088ad in 432ms, sequenceid=48, compaction requested=true 2024-12-06T08:13:22,014 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d, hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/9888d1dbbb9a4df29c92582be88925fc, hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a64b5fa1956d4731b75ce0b7056a493f] to archive 2024-12-06T08:13:22,017 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:13:22,020 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/archive/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/fd06ffec19c5476281d10688b0ce453d 2024-12-06T08:13:22,021 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/9888d1dbbb9a4df29c92582be88925fc to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/archive/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/9888d1dbbb9a4df29c92582be88925fc 2024-12-06T08:13:22,023 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a64b5fa1956d4731b75ce0b7056a493f to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/archive/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/info/a64b5fa1956d4731b75ce0b7056a493f 2024-12-06T08:13:22,044 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/default/TestLogRolling-testSlowSyncLogRolling/62a856be0e3cc633d1481fac3ee088ad/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-06T08:13:22,044 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:13:22,045 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 62a856be0e3cc633d1481fac3ee088ad: 2024-12-06T08:13:22,045 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733472694147.62a856be0e3cc633d1481fac3ee088ad. 2024-12-06T08:13:22,045 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d71ae187633f0d1a9491b2cc77af8371, disabling compactions & flushes 2024-12-06T08:13:22,045 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:13:22,045 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:13:22,045 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. after waiting 0 ms 2024-12-06T08:13:22,045 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:13:22,045 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing d71ae187633f0d1a9491b2cc77af8371 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T08:13:22,062 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371/.tmp/info/cbe5c384ecdb4f13bb6e44794012f6f8 is 45, key is default/info:d/1733472693917/Put/seqid=0 2024-12-06T08:13:22,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741856_1032 (size=5037) 2024-12-06T08:13:22,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741856_1032 (size=5037) 2024-12-06T08:13:22,070 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371/.tmp/info/cbe5c384ecdb4f13bb6e44794012f6f8 2024-12-06T08:13:22,077 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371/.tmp/info/cbe5c384ecdb4f13bb6e44794012f6f8 as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371/info/cbe5c384ecdb4f13bb6e44794012f6f8 2024-12-06T08:13:22,084 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371/info/cbe5c384ecdb4f13bb6e44794012f6f8, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T08:13:22,085 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for d71ae187633f0d1a9491b2cc77af8371 in 40ms, sequenceid=6, compaction requested=false 2024-12-06T08:13:22,089 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/data/hbase/namespace/d71ae187633f0d1a9491b2cc77af8371/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T08:13:22,090 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:13:22,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d71ae187633f0d1a9491b2cc77af8371: 2024-12-06T08:13:22,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733472693355.d71ae187633f0d1a9491b2cc77af8371. 2024-12-06T08:13:22,183 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,38061,1733472691507; all regions closed. 2024-12-06T08:13:22,184 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507 2024-12-06T08:13:22,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741834_1010 (size=4330) 2024-12-06T08:13:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741834_1010 (size=4330) 2024-12-06T08:13:22,190 DEBUG [RS:0;b6b797fc3981:38061 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs 2024-12-06T08:13:22,190 INFO [RS:0;b6b797fc3981:38061 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C38061%2C1733472691507.meta:.meta(num 1733472693116) 2024-12-06T08:13:22,190 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/WALs/b6b797fc3981,38061,1733472691507 2024-12-06T08:13:22,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741851_1027 (size=13066) 2024-12-06T08:13:22,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741851_1027 (size=13066) 2024-12-06T08:13:22,197 DEBUG [RS:0;b6b797fc3981:38061 {}] wal.AbstractFSWAL(1071): Moved 3 WAL file(s) to /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/oldWALs 2024-12-06T08:13:22,197 INFO [RS:0;b6b797fc3981:38061 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C38061%2C1733472691507:(num 1733472781559) 2024-12-06T08:13:22,197 DEBUG [RS:0;b6b797fc3981:38061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:13:22,197 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:13:22,198 INFO [RS:0;b6b797fc3981:38061 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:13:22,198 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:13:22,198 INFO [RS:0;b6b797fc3981:38061 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38061 2024-12-06T08:13:22,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,38061,1733472691507 2024-12-06T08:13:22,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:13:22,203 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,38061,1733472691507] 2024-12-06T08:13:22,204 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,38061,1733472691507; numProcessing=1 2024-12-06T08:13:22,205 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,38061,1733472691507 already deleted, retry=false 2024-12-06T08:13:22,205 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,38061,1733472691507 expired; onlineServers=0 2024-12-06T08:13:22,205 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,37937,1733472690782' ***** 2024-12-06T08:13:22,205 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:13:22,205 DEBUG [M:0;b6b797fc3981:37937 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37932fdd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:13:22,205 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,37937,1733472690782 2024-12-06T08:13:22,205 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,37937,1733472690782; all regions closed. 2024-12-06T08:13:22,206 DEBUG [M:0;b6b797fc3981:37937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:13:22,206 DEBUG [M:0;b6b797fc3981:37937 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:13:22,206 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:13:22,206 DEBUG [M:0;b6b797fc3981:37937 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:13:22,206 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472692412 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472692412,5,FailOnTimeoutGroup] 2024-12-06T08:13:22,206 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472692408 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472692408,5,FailOnTimeoutGroup] 2024-12-06T08:13:22,206 INFO [M:0;b6b797fc3981:37937 {}] hbase.ChoreService(370): Chore service for: master/b6b797fc3981:0 had [] on shutdown 2024-12-06T08:13:22,206 DEBUG [M:0;b6b797fc3981:37937 {}] master.HMaster(1733): Stopping service threads 2024-12-06T08:13:22,206 INFO [M:0;b6b797fc3981:37937 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:13:22,207 INFO [M:0;b6b797fc3981:37937 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:13:22,207 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:13:22,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:13:22,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:22,207 DEBUG [M:0;b6b797fc3981:37937 {}] zookeeper.ZKUtil(347): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:13:22,207 WARN [M:0;b6b797fc3981:37937 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:13:22,207 INFO [M:0;b6b797fc3981:37937 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T08:13:22,208 INFO [M:0;b6b797fc3981:37937 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:13:22,208 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:13:22,208 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:13:22,208 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:22,208 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:22,208 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:13:22,208 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:22,208 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.22 KB heapSize=50.15 KB 2024-12-06T08:13:22,231 DEBUG [M:0;b6b797fc3981:37937 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fc7c98335ae4a54861670e65a973e96 is 82, key is hbase:meta,,1/info:regioninfo/1733472693241/Put/seqid=0 2024-12-06T08:13:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741857_1033 (size=5672) 2024-12-06T08:13:22,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741857_1033 (size=5672) 2024-12-06T08:13:22,238 INFO [M:0;b6b797fc3981:37937 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fc7c98335ae4a54861670e65a973e96 2024-12-06T08:13:22,269 DEBUG [M:0;b6b797fc3981:37937 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/20f0f4d1b937407d932371246d7d7644 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733472694592/Put/seqid=0 2024-12-06T08:13:22,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741858_1034 (size=6427) 2024-12-06T08:13:22,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741858_1034 (size=6427) 2024-12-06T08:13:22,275 INFO [M:0;b6b797fc3981:37937 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.62 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/20f0f4d1b937407d932371246d7d7644 2024-12-06T08:13:22,281 INFO [M:0;b6b797fc3981:37937 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 20f0f4d1b937407d932371246d7d7644 2024-12-06T08:13:22,297 DEBUG [M:0;b6b797fc3981:37937 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d957bc4812b94d53a9d1e5b00d090157 is 69, key is b6b797fc3981,38061,1733472691507/rs:state/1733472692488/Put/seqid=0 2024-12-06T08:13:22,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741859_1035 (size=5156) 2024-12-06T08:13:22,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:13:22,304 INFO [RS:0;b6b797fc3981:38061 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,38061,1733472691507; zookeeper connection closed. 2024-12-06T08:13:22,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741859_1035 (size=5156) 2024-12-06T08:13:22,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38061-0x1006661d8030001, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:13:22,304 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1aef670b {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1aef670b 2024-12-06T08:13:22,304 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T08:13:22,305 INFO [M:0;b6b797fc3981:37937 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d957bc4812b94d53a9d1e5b00d090157 2024-12-06T08:13:22,327 DEBUG [M:0;b6b797fc3981:37937 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a0a53dcf112945b6aa50fdeccd4b2c3b is 52, key is load_balancer_on/state:d/1733472694129/Put/seqid=0 2024-12-06T08:13:22,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741860_1036 (size=5056) 2024-12-06T08:13:22,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741860_1036 (size=5056) 2024-12-06T08:13:22,334 INFO [M:0;b6b797fc3981:37937 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a0a53dcf112945b6aa50fdeccd4b2c3b 2024-12-06T08:13:22,342 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fc7c98335ae4a54861670e65a973e96 as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1fc7c98335ae4a54861670e65a973e96 2024-12-06T08:13:22,349 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1fc7c98335ae4a54861670e65a973e96, entries=8, sequenceid=104, filesize=5.5 K 2024-12-06T08:13:22,351 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/20f0f4d1b937407d932371246d7d7644 as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/20f0f4d1b937407d932371246d7d7644 2024-12-06T08:13:22,357 INFO [M:0;b6b797fc3981:37937 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 20f0f4d1b937407d932371246d7d7644 2024-12-06T08:13:22,358 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/20f0f4d1b937407d932371246d7d7644, entries=11, sequenceid=104, filesize=6.3 K 2024-12-06T08:13:22,359 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d957bc4812b94d53a9d1e5b00d090157 as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d957bc4812b94d53a9d1e5b00d090157 2024-12-06T08:13:22,366 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d957bc4812b94d53a9d1e5b00d090157, entries=1, sequenceid=104, filesize=5.0 K 2024-12-06T08:13:22,367 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a0a53dcf112945b6aa50fdeccd4b2c3b as hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a0a53dcf112945b6aa50fdeccd4b2c3b 2024-12-06T08:13:22,374 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a0a53dcf112945b6aa50fdeccd4b2c3b, entries=1, sequenceid=104, filesize=4.9 K 2024-12-06T08:13:22,375 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.22 KB/41185, heapSize ~50.09 KB/51288, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=104, compaction requested=false 2024-12-06T08:13:22,377 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:22,377 DEBUG [M:0;b6b797fc3981:37937 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:13:22,378 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/MasterData/WALs/b6b797fc3981,37937,1733472690782 2024-12-06T08:13:22,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741830_1006 (size=48486) 2024-12-06T08:13:22,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43367 is added to blk_1073741830_1006 (size=48486) 2024-12-06T08:13:22,381 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:13:22,381 INFO [M:0;b6b797fc3981:37937 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T08:13:22,381 INFO [M:0;b6b797fc3981:37937 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37937 2024-12-06T08:13:22,382 DEBUG [M:0;b6b797fc3981:37937 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b6b797fc3981,37937,1733472690782 already deleted, retry=false 2024-12-06T08:13:22,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:13:22,484 INFO [M:0;b6b797fc3981:37937 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,37937,1733472690782; zookeeper connection closed. 2024-12-06T08:13:22,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37937-0x1006661d8030000, quorum=127.0.0.1:58605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:13:22,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@163cfad6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:22,491 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f952caa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:13:22,491 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:13:22,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6eb1b261{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:13:22,492 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4debea22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir/,STOPPED} 2024-12-06T08:13:22,495 WARN [BP-2060022616-172.17.0.2-1733472687841 heartbeating to localhost/127.0.0.1:39643 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:13:22,495 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:13:22,495 WARN [BP-2060022616-172.17.0.2-1733472687841 heartbeating to localhost/127.0.0.1:39643 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060022616-172.17.0.2-1733472687841 (Datanode Uuid 08f5db71-ca13-4e86-aca8-d2947505f0b7) service to localhost/127.0.0.1:39643 2024-12-06T08:13:22,495 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:13:22,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data3/current/BP-2060022616-172.17.0.2-1733472687841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:22,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data4/current/BP-2060022616-172.17.0.2-1733472687841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:22,497 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:13:22,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6aad8790{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:22,500 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@587d1dca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:13:22,500 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:13:22,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b4ce9e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:13:22,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2276bd44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir/,STOPPED} 2024-12-06T08:13:22,503 WARN [BP-2060022616-172.17.0.2-1733472687841 heartbeating to localhost/127.0.0.1:39643 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:13:22,503 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:13:22,503 WARN [BP-2060022616-172.17.0.2-1733472687841 heartbeating to localhost/127.0.0.1:39643 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060022616-172.17.0.2-1733472687841 (Datanode Uuid 8d79b965-5f09-408d-875f-9be18fd8760a) service to localhost/127.0.0.1:39643 2024-12-06T08:13:22,503 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:13:22,503 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data1/current/BP-2060022616-172.17.0.2-1733472687841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:22,504 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/cluster_4ec09d49-f84c-fb79-2013-08b901b9695b/dfs/data/data2/current/BP-2060022616-172.17.0.2-1733472687841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:22,504 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:13:22,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5682c4d1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:13:22,517 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ff1a6c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:13:22,517 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:13:22,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74468826{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:13:22,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@88aab13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir/,STOPPED} 2024-12-06T08:13:22,526 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:13:22,565 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T08:13:22,567 INFO [regionserver/b6b797fc3981:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:13:22,573 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=62 (was 12) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:39643 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/b6b797fc3981:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:39643 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/b6b797fc3981:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: RS-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/b6b797fc3981:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39643 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@2674f685 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39643 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:39643 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: master/b6b797fc3981:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=401 (was 286) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=176 (was 262), ProcessCount=11 (was 11), AvailableMemoryMB=8496 (was 9032) 2024-12-06T08:13:22,581 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=62, OpenFileDescriptor=401, MaxFileDescriptor=1048576, SystemLoadAverage=176, ProcessCount=11, AvailableMemoryMB=8496 2024-12-06T08:13:22,581 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:13:22,581 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.log.dir so I do NOT create it in target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4 2024-12-06T08:13:22,581 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b7c7a0ae-6618-204e-8d09-2d10fb682aa0/hadoop.tmp.dir so I do NOT create it in target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4 2024-12-06T08:13:22,581 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985, deleteOnExit=true 2024-12-06T08:13:22,581 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/test.cache.data in system properties and HBase conf 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T08:13:22,582 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:13:22,582 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:13:22,583 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:13:22,598 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:13:22,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:22,677 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:13:22,678 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:13:22,678 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:13:22,678 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:13:22,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:22,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6be9adb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:13:22,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@411ed8c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:13:22,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b038f36{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir/jetty-localhost-40837-hadoop-hdfs-3_4_1-tests_jar-_-any-3521785143036168067/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:13:22,799 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34202dec{HTTP/1.1, (http/1.1)}{localhost:40837} 2024-12-06T08:13:22,799 INFO [Time-limited test {}] server.Server(415): Started @117182ms 2024-12-06T08:13:22,813 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:13:22,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:22,888 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:13:22,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:13:22,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:13:22,889 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:13:22,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d56ba76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:13:22,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c5873de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:13:23,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3cd4cdc4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir/jetty-localhost-35521-hadoop-hdfs-3_4_1-tests_jar-_-any-5796742303734189787/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:23,012 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b20cc56{HTTP/1.1, (http/1.1)}{localhost:35521} 2024-12-06T08:13:23,012 INFO [Time-limited test {}] server.Server(415): Started @117395ms 2024-12-06T08:13:23,014 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:13:23,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:23,054 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:13:23,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:13:23,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:13:23,056 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:13:23,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@506c639{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:13:23,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a9cfc80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:13:23,108 WARN [Thread-449 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data1/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:23,108 WARN [Thread-450 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data2/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:23,133 WARN [Thread-428 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:13:23,137 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae1bbacf88144386 with lease ID 0x69c4730966cabc8a: Processing first storage report for DS-81e65821-ebb2-493a-b31b-32904614e919 from datanode DatanodeRegistration(127.0.0.1:39283, datanodeUuid=4a5eb678-d5ca-459f-adcb-17147397f402, infoPort=44995, infoSecurePort=0, ipcPort=42185, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:23,137 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae1bbacf88144386 with lease ID 0x69c4730966cabc8a: from storage DS-81e65821-ebb2-493a-b31b-32904614e919 node DatanodeRegistration(127.0.0.1:39283, datanodeUuid=4a5eb678-d5ca-459f-adcb-17147397f402, infoPort=44995, infoSecurePort=0, ipcPort=42185, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T08:13:23,137 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae1bbacf88144386 with lease ID 0x69c4730966cabc8a: Processing first storage report for DS-01c24fdc-0b51-4184-a096-e9359d37bce8 from datanode DatanodeRegistration(127.0.0.1:39283, datanodeUuid=4a5eb678-d5ca-459f-adcb-17147397f402, infoPort=44995, infoSecurePort=0, ipcPort=42185, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:23,137 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae1bbacf88144386 with lease ID 0x69c4730966cabc8a: from storage DS-01c24fdc-0b51-4184-a096-e9359d37bce8 node DatanodeRegistration(127.0.0.1:39283, datanodeUuid=4a5eb678-d5ca-459f-adcb-17147397f402, infoPort=44995, infoSecurePort=0, ipcPort=42185, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:23,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a9600f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir/jetty-localhost-36003-hadoop-hdfs-3_4_1-tests_jar-_-any-17935687404884339345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:23,176 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2436c428{HTTP/1.1, (http/1.1)}{localhost:36003} 2024-12-06T08:13:23,176 INFO [Time-limited test {}] server.Server(415): Started @117559ms 2024-12-06T08:13:23,178 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:13:23,273 WARN [Thread-476 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data4/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:23,273 WARN [Thread-475 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data3/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:23,301 WARN [Thread-464 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:13:23,304 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65993505b07fdfc with lease ID 0x69c4730966cabc8b: Processing first storage report for DS-047cb55e-8b7a-4fac-961c-cf22fe581033 from datanode DatanodeRegistration(127.0.0.1:44291, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=44749, infoSecurePort=0, ipcPort=34419, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:23,304 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65993505b07fdfc with lease ID 0x69c4730966cabc8b: from storage DS-047cb55e-8b7a-4fac-961c-cf22fe581033 node DatanodeRegistration(127.0.0.1:44291, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=44749, infoSecurePort=0, ipcPort=34419, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:23,304 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x65993505b07fdfc with lease ID 0x69c4730966cabc8b: Processing first storage report for DS-125022d5-a1c0-4e9a-ae2d-473fc423cbd9 from datanode DatanodeRegistration(127.0.0.1:44291, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=44749, infoSecurePort=0, ipcPort=34419, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:23,304 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x65993505b07fdfc with lease ID 0x69c4730966cabc8b: from storage DS-125022d5-a1c0-4e9a-ae2d-473fc423cbd9 node DatanodeRegistration(127.0.0.1:44291, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=44749, infoSecurePort=0, ipcPort=34419, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:23,309 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4 2024-12-06T08:13:23,314 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/zookeeper_0, clientPort=55687, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:13:23,315 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55687 2024-12-06T08:13:23,315 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:23,317 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:23,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:13:23,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:13:23,336 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d with version=8 2024-12-06T08:13:23,336 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/hbase-staging 2024-12-06T08:13:23,338 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:13:23,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:23,339 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:23,339 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:13:23,339 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:23,339 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:13:23,339 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:13:23,339 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:13:23,340 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37831 2024-12-06T08:13:23,340 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:23,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:23,345 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:37831 connecting to ZooKeeper ensemble=127.0.0.1:55687 2024-12-06T08:13:23,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:378310x0, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:13:23,356 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37831-0x100666392d80000 connected 2024-12-06T08:13:23,370 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:13:23,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:13:23,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:13:23,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37831 2024-12-06T08:13:23,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37831 2024-12-06T08:13:23,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37831 2024-12-06T08:13:23,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37831 2024-12-06T08:13:23,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37831 2024-12-06T08:13:23,374 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d, hbase.cluster.distributed=false 2024-12-06T08:13:23,393 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:13:23,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:23,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:23,393 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:13:23,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:23,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:13:23,393 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:13:23,393 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:13:23,394 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44681 2024-12-06T08:13:23,394 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:13:23,396 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:13:23,396 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:23,399 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:23,402 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44681 connecting to ZooKeeper ensemble=127.0.0.1:55687 2024-12-06T08:13:23,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446810x0, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:13:23,406 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44681-0x100666392d80001 connected 2024-12-06T08:13:23,406 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:13:23,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:13:23,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:13:23,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44681 2024-12-06T08:13:23,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44681 2024-12-06T08:13:23,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44681 2024-12-06T08:13:23,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44681 2024-12-06T08:13:23,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44681 2024-12-06T08:13:23,424 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b6b797fc3981,37831,1733472803338 2024-12-06T08:13:23,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:13:23,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:13:23,427 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b6b797fc3981,37831,1733472803338 2024-12-06T08:13:23,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:13:23,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,429 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:13:23,430 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b6b797fc3981,37831,1733472803338 from backup master directory 2024-12-06T08:13:23,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b6b797fc3981,37831,1733472803338 2024-12-06T08:13:23,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:13:23,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:13:23,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:13:23,432 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:13:23,432 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:13:23,432 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b6b797fc3981,37831,1733472803338 2024-12-06T08:13:23,441 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b6b797fc3981:37831 2024-12-06T08:13:23,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:13:23,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:13:23,447 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/hbase.id with ID: ad1a3834-e067-419c-8d82-5229bfec8400 2024-12-06T08:13:23,462 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:23,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:13:23,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:13:23,479 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:13:23,480 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:13:23,482 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:13:23,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:13:23,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:13:23,498 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store 2024-12-06T08:13:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:13:23,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:13:23,909 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:23,909 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:13:23,909 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:23,909 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:23,909 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:13:23,909 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:23,909 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:13:23,909 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:13:23,910 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/.initializing 2024-12-06T08:13:23,911 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338 2024-12-06T08:13:23,914 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C37831%2C1733472803338, suffix=, logDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338, archiveDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/oldWALs, maxLogs=10 2024-12-06T08:13:23,915 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C37831%2C1733472803338.1733472803914 2024-12-06T08:13:23,925 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 2024-12-06T08:13:23,925 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44749:44749),(127.0.0.1/127.0.0.1:44995:44995)] 2024-12-06T08:13:23,925 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:13:23,925 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:23,926 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,926 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,931 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:13:23,931 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:23,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:13:23,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:13:23,934 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:23,934 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:13:23,935 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,936 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:13:23,936 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:23,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:13:23,938 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:13:23,941 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:23,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:13:23,942 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,943 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,945 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:13:23,946 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:13:23,949 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:13:23,949 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874910, jitterRate=0.11250682175159454}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:13:23,951 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:13:23,951 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:13:23,955 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26245188, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:13:23,956 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T08:13:23,956 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:13:23,956 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:13:23,957 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:13:23,957 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T08:13:23,957 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T08:13:23,957 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:13:23,960 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:13:23,961 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:13:23,962 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:13:23,962 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:13:23,963 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:13:23,964 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:13:23,964 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:13:23,965 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:13:23,966 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:13:23,967 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:13:23,969 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:13:23,970 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:13:23,971 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:13:23,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:13:23,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:13:23,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,974 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b6b797fc3981,37831,1733472803338, sessionid=0x100666392d80000, setting cluster-up flag (Was=false) 2024-12-06T08:13:23,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,981 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:13:23,983 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,37831,1733472803338 2024-12-06T08:13:23,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:23,990 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:13:23,992 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,37831,1733472803338 2024-12-06T08:13:23,995 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T08:13:23,995 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T08:13:23,996 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:13:23,996 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b6b797fc3981,37831,1733472803338 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:13:23,996 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:13:23,996 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:13:23,997 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:13:23,997 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:13:23,997 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b6b797fc3981:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:13:23,997 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:23,997 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:13:23,997 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:23,999 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:13:23,999 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:13:24,000 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733472834000 2024-12-06T08:13:24,000 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:13:24,000 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:13:24,000 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:13:24,001 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:13:24,001 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:13:24,001 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:13:24,001 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,001 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:13:24,001 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,002 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:13:24,002 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:13:24,002 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:13:24,002 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:13:24,002 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:13:24,003 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472804003,5,FailOnTimeoutGroup] 2024-12-06T08:13:24,003 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472804003,5,FailOnTimeoutGroup] 2024-12-06T08:13:24,003 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,003 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:13:24,004 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,004 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:13:24,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:13:24,012 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T08:13:24,012 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d 2024-12-06T08:13:24,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:13:24,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:13:24,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:24,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:13:24,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:13:24,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:13:24,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:13:24,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:13:24,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:13:24,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:13:24,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:13:24,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,041 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:13:24,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/meta/1588230740 2024-12-06T08:13:24,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/meta/1588230740 2024-12-06T08:13:24,044 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b6b797fc3981:44681 2024-12-06T08:13:24,044 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:13:24,045 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1008): ClusterId : ad1a3834-e067-419c-8d82-5229bfec8400 2024-12-06T08:13:24,045 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:13:24,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:13:24,048 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:13:24,048 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:13:24,049 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:13:24,050 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882705, jitterRate=0.1224178820848465}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:13:24,051 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:13:24,051 DEBUG [RS:0;b6b797fc3981:44681 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7211d90e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:13:24,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:13:24,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:13:24,052 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:13:24,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:13:24,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:13:24,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:13:24,052 DEBUG [RS:0;b6b797fc3981:44681 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f6b8a2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:13:24,052 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:13:24,052 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:13:24,052 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:13:24,053 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:13:24,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:13:24,054 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,37831,1733472803338 with isa=b6b797fc3981/172.17.0.2:44681, startcode=1733472803392 2024-12-06T08:13:24,054 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:13:24,054 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T08:13:24,054 DEBUG [RS:0;b6b797fc3981:44681 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:13:24,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:13:24,056 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:13:24,058 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:13:24,058 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60289, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:13:24,059 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37831 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,059 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37831 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,061 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d 2024-12-06T08:13:24,061 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44775 2024-12-06T08:13:24,061 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:13:24,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:13:24,064 DEBUG [RS:0;b6b797fc3981:44681 {}] zookeeper.ZKUtil(111): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,065 WARN [RS:0;b6b797fc3981:44681 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:13:24,065 INFO [RS:0;b6b797fc3981:44681 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:13:24,065 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,065 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,44681,1733472803392] 2024-12-06T08:13:24,068 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:13:24,069 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:13:24,072 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:13:24,072 INFO [RS:0;b6b797fc3981:44681 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:13:24,072 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,073 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:13:24,074 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,074 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,075 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,076 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:24,076 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:13:24,076 DEBUG [RS:0;b6b797fc3981:44681 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:13:24,080 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,080 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,080 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,080 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,080 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,44681,1733472803392-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:13:24,097 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:13:24,097 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,44681,1733472803392-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,113 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.Replication(204): b6b797fc3981,44681,1733472803392 started 2024-12-06T08:13:24,113 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,44681,1733472803392, RpcServer on b6b797fc3981/172.17.0.2:44681, sessionid=0x100666392d80001 2024-12-06T08:13:24,113 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:13:24,113 DEBUG [RS:0;b6b797fc3981:44681 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,113 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,44681,1733472803392' 2024-12-06T08:13:24,113 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:13:24,114 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:13:24,114 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:13:24,114 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:13:24,114 DEBUG [RS:0;b6b797fc3981:44681 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,114 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,44681,1733472803392' 2024-12-06T08:13:24,114 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:13:24,114 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:13:24,115 DEBUG [RS:0;b6b797fc3981:44681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:13:24,115 INFO [RS:0;b6b797fc3981:44681 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:13:24,115 INFO [RS:0;b6b797fc3981:44681 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:13:24,208 WARN [b6b797fc3981:37831 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:13:24,218 INFO [RS:0;b6b797fc3981:44681 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C44681%2C1733472803392, suffix=, logDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392, archiveDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs, maxLogs=32 2024-12-06T08:13:24,221 INFO [RS:0;b6b797fc3981:44681 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.1733472804221 2024-12-06T08:13:24,229 INFO [RS:0;b6b797fc3981:44681 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 2024-12-06T08:13:24,229 DEBUG [RS:0;b6b797fc3981:44681 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44749:44749),(127.0.0.1/127.0.0.1:44995:44995)] 2024-12-06T08:13:24,459 DEBUG [b6b797fc3981:37831 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T08:13:24,459 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,461 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,44681,1733472803392, state=OPENING 2024-12-06T08:13:24,462 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:13:24,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:24,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:24,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b6b797fc3981,44681,1733472803392}] 2024-12-06T08:13:24,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:13:24,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:13:24,618 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,618 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:13:24,620 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:13:24,625 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T08:13:24,625 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:13:24,627 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C44681%2C1733472803392.meta, suffix=.meta, logDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392, archiveDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs, maxLogs=32 2024-12-06T08:13:24,629 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta 2024-12-06T08:13:24,636 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta 2024-12-06T08:13:24,636 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44995:44995),(127.0.0.1/127.0.0.1:44749:44749)] 2024-12-06T08:13:24,636 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:13:24,637 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:13:24,637 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:13:24,637 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:13:24,637 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:13:24,637 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:24,637 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T08:13:24,637 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T08:13:24,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:13:24,643 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:13:24,643 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:13:24,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:13:24,645 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:13:24,645 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:13:24,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:13:24,646 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:13:24,646 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:13:24,648 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/meta/1588230740 2024-12-06T08:13:24,649 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/meta/1588230740 2024-12-06T08:13:24,651 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:13:24,652 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:13:24,653 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805325, jitterRate=0.024024158716201782}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:13:24,654 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:13:24,656 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733472804617 2024-12-06T08:13:24,658 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:13:24,658 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T08:13:24,659 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,660 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,44681,1733472803392, state=OPEN 2024-12-06T08:13:24,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:13:24,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:13:24,665 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:13:24,665 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:13:24,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:13:24,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b6b797fc3981,44681,1733472803392 in 201 msec 2024-12-06T08:13:24,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:13:24,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-12-06T08:13:24,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 680 msec 2024-12-06T08:13:24,675 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733472804675, completionTime=-1 2024-12-06T08:13:24,675 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T08:13:24,675 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T08:13:24,676 DEBUG [hconnection-0x5a3da064-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:13:24,677 INFO [RS-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34598, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:13:24,678 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T08:13:24,678 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733472864678 2024-12-06T08:13:24,678 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733472924678 2024-12-06T08:13:24,678 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-06T08:13:24,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37831,1733472803338-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37831,1733472803338-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37831,1733472803338-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b6b797fc3981:37831, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:24,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T08:13:24,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:13:24,686 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T08:13:24,686 DEBUG [master/b6b797fc3981:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T08:13:24,687 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:13:24,687 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:24,688 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:13:24,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:13:24,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:13:24,700 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7fad0d052eed1f21c2de95b566a7b754, NAME => 'hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d 2024-12-06T08:13:24,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:13:24,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:13:24,708 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:24,708 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 7fad0d052eed1f21c2de95b566a7b754, disabling compactions & flushes 2024-12-06T08:13:24,708 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:13:24,708 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:13:24,708 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. after waiting 0 ms 2024-12-06T08:13:24,709 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:13:24,709 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:13:24,709 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7fad0d052eed1f21c2de95b566a7b754: 2024-12-06T08:13:24,710 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:13:24,710 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733472804710"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472804710"}]},"ts":"1733472804710"} 2024-12-06T08:13:24,713 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:13:24,714 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:13:24,715 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472804714"}]},"ts":"1733472804714"} 2024-12-06T08:13:24,717 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T08:13:24,721 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=7fad0d052eed1f21c2de95b566a7b754, ASSIGN}] 2024-12-06T08:13:24,722 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=7fad0d052eed1f21c2de95b566a7b754, ASSIGN 2024-12-06T08:13:24,723 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=7fad0d052eed1f21c2de95b566a7b754, ASSIGN; state=OFFLINE, location=b6b797fc3981,44681,1733472803392; forceNewPlan=false, retain=false 2024-12-06T08:13:24,874 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=7fad0d052eed1f21c2de95b566a7b754, regionState=OPENING, regionLocation=b6b797fc3981,44681,1733472803392 2024-12-06T08:13:24,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 7fad0d052eed1f21c2de95b566a7b754, server=b6b797fc3981,44681,1733472803392}] 2024-12-06T08:13:25,031 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,44681,1733472803392 2024-12-06T08:13:25,035 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:13:25,036 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 7fad0d052eed1f21c2de95b566a7b754, NAME => 'hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:13:25,036 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:13:25,036 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:25,036 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:13:25,037 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:13:25,038 INFO [StoreOpener-7fad0d052eed1f21c2de95b566a7b754-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:13:25,040 INFO [StoreOpener-7fad0d052eed1f21c2de95b566a7b754-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7fad0d052eed1f21c2de95b566a7b754 columnFamilyName info 2024-12-06T08:13:25,040 DEBUG [StoreOpener-7fad0d052eed1f21c2de95b566a7b754-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:25,041 INFO [StoreOpener-7fad0d052eed1f21c2de95b566a7b754-1 {}] regionserver.HStore(327): Store=7fad0d052eed1f21c2de95b566a7b754/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:13:25,042 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:13:25,042 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:13:25,044 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:13:25,047 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:13:25,048 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 7fad0d052eed1f21c2de95b566a7b754; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804547, jitterRate=0.02303515374660492}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:13:25,049 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 7fad0d052eed1f21c2de95b566a7b754: 2024-12-06T08:13:25,050 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754., pid=6, masterSystemTime=1733472805031 2024-12-06T08:13:25,053 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:13:25,053 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:13:25,054 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=7fad0d052eed1f21c2de95b566a7b754, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,44681,1733472803392 2024-12-06T08:13:25,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:13:25,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 7fad0d052eed1f21c2de95b566a7b754, server=b6b797fc3981,44681,1733472803392 in 179 msec 2024-12-06T08:13:25,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:13:25,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=7fad0d052eed1f21c2de95b566a7b754, ASSIGN in 338 msec 2024-12-06T08:13:25,062 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:13:25,063 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472805063"}]},"ts":"1733472805063"} 2024-12-06T08:13:25,065 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T08:13:25,067 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:13:25,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 383 msec 2024-12-06T08:13:25,087 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T08:13:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:13:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:25,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:13:25,093 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T08:13:25,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:13:25,108 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 14 msec 2024-12-06T08:13:25,116 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T08:13:25,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:13:25,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 12 msec 2024-12-06T08:13:25,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T08:13:25,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T08:13:25,147 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.715sec 2024-12-06T08:13:25,147 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:13:25,147 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:13:25,148 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:13:25,148 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:13:25,148 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:13:25,148 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37831,1733472803338-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:13:25,148 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37831,1733472803338-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:13:25,150 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:13:25,150 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:13:25,150 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,37831,1733472803338-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,227 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d72d7be to 127.0.0.1:55687 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1830e3b0 2024-12-06T08:13:25,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6165ab3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:13:25,233 DEBUG [hconnection-0x379959bd-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:13:25,235 INFO [RS-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:13:25,237 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b6b797fc3981,37831,1733472803338 2024-12-06T08:13:25,238 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:25,241 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T08:13:25,258 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:13:25,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:25,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:25,258 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:13:25,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:13:25,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:13:25,258 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:13:25,258 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:13:25,259 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44247 2024-12-06T08:13:25,260 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:13:25,260 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:13:25,261 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:25,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:13:25,266 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44247 connecting to ZooKeeper ensemble=127.0.0.1:55687 2024-12-06T08:13:25,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442470x0, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:13:25,270 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:442470x0, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:13:25,270 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44247-0x100666392d80003 connected 2024-12-06T08:13:25,271 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-06T08:13:25,272 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:13:25,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44247 2024-12-06T08:13:25,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44247 2024-12-06T08:13:25,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44247 2024-12-06T08:13:25,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44247 2024-12-06T08:13:25,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44247 2024-12-06T08:13:25,282 DEBUG [pool-282-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-06T08:13:25,294 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b6b797fc3981:44247 2024-12-06T08:13:25,295 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1008): ClusterId : ad1a3834-e067-419c-8d82-5229bfec8400 2024-12-06T08:13:25,295 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:13:25,297 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:13:25,297 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:13:25,299 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:13:25,300 DEBUG [RS:1;b6b797fc3981:44247 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9c9b96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:13:25,300 DEBUG [RS:1;b6b797fc3981:44247 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f05385c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:13:25,300 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:13:25,300 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:13:25,300 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:13:25,301 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,37831,1733472803338 with isa=b6b797fc3981/172.17.0.2:44247, startcode=1733472805257 2024-12-06T08:13:25,301 DEBUG [RS:1;b6b797fc3981:44247 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:13:25,304 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53865, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:13:25,304 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37831 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,44247,1733472805257 2024-12-06T08:13:25,304 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37831 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,44247,1733472805257 2024-12-06T08:13:25,306 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d 2024-12-06T08:13:25,306 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44775 2024-12-06T08:13:25,306 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:13:25,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:13:25,309 DEBUG [RS:1;b6b797fc3981:44247 {}] zookeeper.ZKUtil(111): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,44247,1733472805257 2024-12-06T08:13:25,309 WARN [RS:1;b6b797fc3981:44247 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:13:25,309 INFO [RS:1;b6b797fc3981:44247 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:13:25,309 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,44247,1733472805257] 2024-12-06T08:13:25,309 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44247,1733472805257 2024-12-06T08:13:25,315 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:13:25,315 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:13:25,318 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:13:25,318 INFO [RS:1;b6b797fc3981:44247 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:13:25,318 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,319 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:13:25,319 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:13:25,320 DEBUG [RS:1;b6b797fc3981:44247 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:13:25,322 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,322 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,322 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,322 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,322 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,44247,1733472805257-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:13:25,338 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:13:25,339 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,44247,1733472805257-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:13:25,354 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.Replication(204): b6b797fc3981,44247,1733472805257 started 2024-12-06T08:13:25,355 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,44247,1733472805257, RpcServer on b6b797fc3981/172.17.0.2:44247, sessionid=0x100666392d80003 2024-12-06T08:13:25,355 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3355): Started new server=Thread[RS:1;b6b797fc3981:44247,5,FailOnTimeoutGroup] 2024-12-06T08:13:25,355 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:13:25,355 DEBUG [RS:1;b6b797fc3981:44247 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,44247,1733472805257 2024-12-06T08:13:25,355 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,44247,1733472805257' 2024-12-06T08:13:25,355 INFO [Time-limited test {}] wal.TestLogRolling(191): Replication=2 2024-12-06T08:13:25,355 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:13:25,356 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:13:25,356 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:13:25,356 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:13:25,356 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:13:25,356 DEBUG [RS:1;b6b797fc3981:44247 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,44247,1733472805257 2024-12-06T08:13:25,356 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,44247,1733472805257' 2024-12-06T08:13:25,357 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:13:25,357 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:13:25,357 DEBUG [RS:1;b6b797fc3981:44247 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:13:25,358 INFO [RS:1;b6b797fc3981:44247 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:13:25,358 INFO [RS:1;b6b797fc3981:44247 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:13:25,358 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:13:25,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37831 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T08:13:25,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37831 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T08:13:25,360 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37831 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:13:25,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37831 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T08:13:25,363 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:13:25,363 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:25,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37831 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 9 2024-12-06T08:13:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37831 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:13:25,365 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:13:25,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741837_1013 (size=393) 2024-12-06T08:13:25,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741837_1013 (size=393) 2024-12-06T08:13:25,375 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1fe245b8d8375ab9b801a570a1b2b5e8, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d 2024-12-06T08:13:25,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39283 is added to blk_1073741838_1014 (size=76) 2024-12-06T08:13:25,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44291 is added to blk_1073741838_1014 (size=76) 2024-12-06T08:13:25,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:25,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1681): Closing 1fe245b8d8375ab9b801a570a1b2b5e8, disabling compactions & flushes 2024-12-06T08:13:25,383 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:25,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:25,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. after waiting 0 ms 2024-12-06T08:13:25,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:25,383 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:25,383 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1fe245b8d8375ab9b801a570a1b2b5e8: 2024-12-06T08:13:25,385 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:13:25,385 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733472805385"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472805385"}]},"ts":"1733472805385"} 2024-12-06T08:13:25,388 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:13:25,389 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:13:25,390 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472805389"}]},"ts":"1733472805389"} 2024-12-06T08:13:25,392 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-06T08:13:25,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1fe245b8d8375ab9b801a570a1b2b5e8, ASSIGN}] 2024-12-06T08:13:25,397 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1fe245b8d8375ab9b801a570a1b2b5e8, ASSIGN 2024-12-06T08:13:25,399 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1fe245b8d8375ab9b801a570a1b2b5e8, ASSIGN; state=OFFLINE, location=b6b797fc3981,44681,1733472803392; forceNewPlan=false, retain=false 2024-12-06T08:13:25,460 INFO [RS:1;b6b797fc3981:44247 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C44247%2C1733472805257, suffix=, logDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44247,1733472805257, archiveDir=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs, maxLogs=32 2024-12-06T08:13:25,461 INFO [RS:1;b6b797fc3981:44247 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44247%2C1733472805257.1733472805461 2024-12-06T08:13:25,471 INFO [RS:1;b6b797fc3981:44247 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44247,1733472805257/b6b797fc3981%2C44247%2C1733472805257.1733472805461 2024-12-06T08:13:25,471 DEBUG [RS:1;b6b797fc3981:44247 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44749:44749),(127.0.0.1/127.0.0.1:44995:44995)] 2024-12-06T08:13:25,550 INFO [b6b797fc3981:37831 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-06T08:13:25,550 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=1fe245b8d8375ab9b801a570a1b2b5e8, regionState=OPENING, regionLocation=b6b797fc3981,44681,1733472803392 2024-12-06T08:13:25,553 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 1fe245b8d8375ab9b801a570a1b2b5e8, server=b6b797fc3981,44681,1733472803392}] 2024-12-06T08:13:25,706 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,44681,1733472803392 2024-12-06T08:13:25,711 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:25,711 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 1fe245b8d8375ab9b801a570a1b2b5e8, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:13:25,712 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:25,712 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:13:25,712 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:25,712 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:25,714 INFO [StoreOpener-1fe245b8d8375ab9b801a570a1b2b5e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:25,715 INFO [StoreOpener-1fe245b8d8375ab9b801a570a1b2b5e8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1fe245b8d8375ab9b801a570a1b2b5e8 columnFamilyName info 2024-12-06T08:13:25,715 DEBUG [StoreOpener-1fe245b8d8375ab9b801a570a1b2b5e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:13:25,716 INFO [StoreOpener-1fe245b8d8375ab9b801a570a1b2b5e8-1 {}] regionserver.HStore(327): Store=1fe245b8d8375ab9b801a570a1b2b5e8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:13:25,717 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:25,717 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:25,720 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:25,722 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:13:25,722 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 1fe245b8d8375ab9b801a570a1b2b5e8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752725, jitterRate=-0.04286077618598938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:13:25,723 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 1fe245b8d8375ab9b801a570a1b2b5e8: 2024-12-06T08:13:25,724 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8., pid=11, masterSystemTime=1733472805706 2024-12-06T08:13:25,727 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:25,727 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:25,728 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=1fe245b8d8375ab9b801a570a1b2b5e8, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,44681,1733472803392 2024-12-06T08:13:25,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T08:13:25,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 1fe245b8d8375ab9b801a570a1b2b5e8, server=b6b797fc3981,44681,1733472803392 in 177 msec 2024-12-06T08:13:25,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T08:13:25,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1fe245b8d8375ab9b801a570a1b2b5e8, ASSIGN in 337 msec 2024-12-06T08:13:25,736 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:13:25,736 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472805736"}]},"ts":"1733472805736"} 2024-12-06T08:13:25,738 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-06T08:13:25,741 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:13:25,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 381 msec 2024-12-06T08:13:26,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:26,691 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:27,203 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:13:27,205 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:27,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:30,069 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T08:13:30,070 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T08:13:30,071 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-06T08:13:31,274 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T08:13:31,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-06T08:13:31,276 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-06T08:13:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37831 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:13:35,366 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath, procId: 9 completed 2024-12-06T08:13:35,369 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-06T08:13:35,369 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:13:35,384 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:35,388 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:13:35,389 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:13:35,389 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:13:35,389 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:13:35,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b47a450{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:13:35,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b66ddd8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:13:35,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@555ad0cc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir/jetty-localhost-42385-hadoop-hdfs-3_4_1-tests_jar-_-any-9173650655760119272/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:35,507 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15e1fc81{HTTP/1.1, (http/1.1)}{localhost:42385} 2024-12-06T08:13:35,507 INFO [Time-limited test {}] server.Server(415): Started @129890ms 2024-12-06T08:13:35,509 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:13:35,553 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:35,558 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:13:35,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:13:35,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:13:35,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:13:35,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@347096cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:13:35,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@548a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:13:35,610 WARN [Thread-632 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data6/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:35,610 WARN [Thread-631 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data5/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:35,635 WARN [Thread-611 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:13:35,638 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9efd511221290f1e with lease ID 0x69c4730966cabc8c: Processing first storage report for DS-cd90c2ff-8646-4934-8668-a01a509adf05 from datanode DatanodeRegistration(127.0.0.1:32945, datanodeUuid=806043b0-da6a-43cb-a70f-24154aa17528, infoPort=43607, infoSecurePort=0, ipcPort=44463, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:35,638 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9efd511221290f1e with lease ID 0x69c4730966cabc8c: from storage DS-cd90c2ff-8646-4934-8668-a01a509adf05 node DatanodeRegistration(127.0.0.1:32945, datanodeUuid=806043b0-da6a-43cb-a70f-24154aa17528, infoPort=43607, infoSecurePort=0, ipcPort=44463, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:35,638 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9efd511221290f1e with lease ID 0x69c4730966cabc8c: Processing first storage report for DS-9ea9b0bb-ea45-4ec7-ad5c-f1a791ffd983 from datanode DatanodeRegistration(127.0.0.1:32945, datanodeUuid=806043b0-da6a-43cb-a70f-24154aa17528, infoPort=43607, infoSecurePort=0, ipcPort=44463, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:35,639 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9efd511221290f1e with lease ID 0x69c4730966cabc8c: from storage DS-9ea9b0bb-ea45-4ec7-ad5c-f1a791ffd983 node DatanodeRegistration(127.0.0.1:32945, datanodeUuid=806043b0-da6a-43cb-a70f-24154aa17528, infoPort=43607, infoSecurePort=0, ipcPort=44463, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:35,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57fbd89{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir/jetty-localhost-35427-hadoop-hdfs-3_4_1-tests_jar-_-any-4949184273318161519/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:35,678 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e9e9343{HTTP/1.1, (http/1.1)}{localhost:35427} 2024-12-06T08:13:35,678 INFO [Time-limited test {}] server.Server(415): Started @130061ms 2024-12-06T08:13:35,680 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:13:35,715 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:35,719 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:13:35,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:13:35,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:13:35,719 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:13:35,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f7f175c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:13:35,720 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cfe5350{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:13:35,782 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data7/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:35,782 WARN [Thread-667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data8/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:35,802 WARN [Thread-646 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:13:35,806 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2199a83e1691709 with lease ID 0x69c4730966cabc8d: Processing first storage report for DS-c3a7f7c8-749a-4de6-a734-5992f832b964 from datanode DatanodeRegistration(127.0.0.1:35169, datanodeUuid=8df28f4f-c606-48f4-8fa4-384c429e5df5, infoPort=41899, infoSecurePort=0, ipcPort=38003, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:35,806 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2199a83e1691709 with lease ID 0x69c4730966cabc8d: from storage DS-c3a7f7c8-749a-4de6-a734-5992f832b964 node DatanodeRegistration(127.0.0.1:35169, datanodeUuid=8df28f4f-c606-48f4-8fa4-384c429e5df5, infoPort=41899, infoSecurePort=0, ipcPort=38003, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:35,806 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2199a83e1691709 with lease ID 0x69c4730966cabc8d: Processing first storage report for DS-69230bd5-1b24-4710-9db9-c0108634473c from datanode DatanodeRegistration(127.0.0.1:35169, datanodeUuid=8df28f4f-c606-48f4-8fa4-384c429e5df5, infoPort=41899, infoSecurePort=0, ipcPort=38003, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:35,806 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2199a83e1691709 with lease ID 0x69c4730966cabc8d: from storage DS-69230bd5-1b24-4710-9db9-c0108634473c node DatanodeRegistration(127.0.0.1:35169, datanodeUuid=8df28f4f-c606-48f4-8fa4-384c429e5df5, infoPort=41899, infoSecurePort=0, ipcPort=38003, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:35,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3635bd54{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir/jetty-localhost-40493-hadoop-hdfs-3_4_1-tests_jar-_-any-15773453488304144718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:35,840 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d483e31{HTTP/1.1, (http/1.1)}{localhost:40493} 2024-12-06T08:13:35,840 INFO [Time-limited test {}] server.Server(415): Started @130223ms 2024-12-06T08:13:35,842 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:13:35,948 WARN [Thread-692 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data9/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:35,948 WARN [Thread-693 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data10/current/BP-1291778185-172.17.0.2-1733472802617/current, will proceed with Du for space computation calculation, 2024-12-06T08:13:35,966 WARN [Thread-681 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:13:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ba5431100112972 with lease ID 0x69c4730966cabc8e: Processing first storage report for DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e from datanode DatanodeRegistration(127.0.0.1:44359, datanodeUuid=2a740072-7170-41d4-80ae-eee6d432026c, infoPort=38299, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ba5431100112972 with lease ID 0x69c4730966cabc8e: from storage DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e node DatanodeRegistration(127.0.0.1:44359, datanodeUuid=2a740072-7170-41d4-80ae-eee6d432026c, infoPort=38299, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ba5431100112972 with lease ID 0x69c4730966cabc8e: Processing first storage report for DS-b4311eb5-ff17-43d1-93ed-0ba87a3f70a8 from datanode DatanodeRegistration(127.0.0.1:44359, datanodeUuid=2a740072-7170-41d4-80ae-eee6d432026c, infoPort=38299, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617) 2024-12-06T08:13:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ba5431100112972 with lease ID 0x69c4730966cabc8e: from storage DS-b4311eb5-ff17-43d1-93ed-0ba87a3f70a8 node DatanodeRegistration(127.0.0.1:44359, datanodeUuid=2a740072-7170-41d4-80ae-eee6d432026c, infoPort=38299, infoSecurePort=0, ipcPort=36911, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:35,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a9600f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:35,975 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2436c428{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:13:35,975 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:13:35,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a9cfc80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:13:35,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@506c639{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,STOPPED} 2024-12-06T08:13:35,973 WARN [ResponseProcessor for block BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,973 WARN [ResponseProcessor for block BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,978 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK], DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]) is bad. 2024-12-06T08:13:35,978 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta block BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK], DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]) is bad. 2024-12-06T08:13:35,973 WARN [ResponseProcessor for block BP-1291778185-172.17.0.2-1733472802617:blk_1073741839_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1291778185-172.17.0.2-1733472802617:blk_1073741839_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,973 WARN [ResponseProcessor for block BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,979 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK], DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]) is bad. 2024-12-06T08:13:35,979 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44247,1733472805257/b6b797fc3981%2C44247%2C1733472805257.1733472805461 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741839_1015 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741839_1015 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK], DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]) is bad. 2024-12-06T08:13:35,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412925412_22 at /127.0.0.1:37464 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37464 dst: /127.0.0.1:44291 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1898411860_22 at /127.0.0.1:48680 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:39283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48680 dst: /127.0.0.1:39283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1898411860_22 at /127.0.0.1:37548 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:44291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37548 dst: /127.0.0.1:44291 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,982 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:13:35,978 WARN [PacketResponder: BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44291] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,982 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1291778185-172.17.0.2-1733472802617 (Datanode Uuid 633c6123-6914-48b4-b517-93772ad00228) service to localhost/127.0.0.1:44775 2024-12-06T08:13:35,982 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:13:35,982 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:13:35,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412925412_22 at /127.0.0.1:48608 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48608 dst: /127.0.0.1:39283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,983 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:48636 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48636 dst: /127.0.0.1:39283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:37502 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37502 dst: /127.0.0.1:44291 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49757 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:37492 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44291:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37492 dst: /127.0.0.1:44291 java.io.InterruptedIOException: Interrupted receiveBlock at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,981 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:48632 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48632 dst: /127.0.0.1:39283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:35,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data3/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:35,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data4/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:35,984 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:13:35,985 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,993 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta block BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,993 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3cd4cdc4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:35,994 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b20cc56{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:13:35,994 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:13:35,994 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44247,1733472805257/b6b797fc3981%2C44247%2C1733472805257.1733472805461 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741839_1015 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1015 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:35,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c5873de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:13:35,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d56ba76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,STOPPED} 2024-12-06T08:13:35,996 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:13:35,996 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:13:35,996 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1291778185-172.17.0.2-1733472802617 (Datanode Uuid 4a5eb678-d5ca-459f-adcb-17147397f402) service to localhost/127.0.0.1:44775 2024-12-06T08:13:35,996 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:13:35,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data1/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:35,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data2/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:35,997 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:13:36,001 WARN [RS:0;b6b797fc3981:44681.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=4, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:36,002 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C44681%2C1733472803392:(num 1733472804221) roll requested 2024-12-06T08:13:36,002 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.1733472816002 2024-12-06T08:13:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44681 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:36,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44681 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34614 deadline: 1733472826001, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-06T08:13:36,012 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=4, requesting roll of WAL 2024-12-06T08:13:36,012 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 with entries=4, filesize=959 B; new WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472816002 2024-12-06T08:13:36,014 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38299:38299),(127.0.0.1/127.0.0.1:43607:43607)] 2024-12-06T08:13:36,014 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 is not closed yet, will try archiving it next time 2024-12-06T08:13:36,014 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:36,014 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:36,015 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-06T08:13:36,015 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-06T08:13:36,015 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 2024-12-06T08:13:36,018 WARN [IPC Server handler 3 on default port 44775 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1009 2024-12-06T08:13:36,020 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 after 5ms 2024-12-06T08:13:36,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:13:36,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:36,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:36,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:36,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:13:40,021 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 after 4006ms 2024-12-06T08:13:48,073 INFO [Time-limited test {}] wal.TestLogRolling(243): log.getCurrentFileName(): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472816002 2024-12-06T08:13:48,074 WARN [ResponseProcessor for block BP-1291778185-172.17.0.2-1733472802617:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1291778185-172.17.0.2-1733472802617:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:48,074 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472816002 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK], DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]) is bad. 2024-12-06T08:13:48,075 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:59970 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:44359:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59970 dst: /127.0.0.1:44359 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:48,075 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:37160 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:32945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37160 dst: /127.0.0.1:32945 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:48,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3635bd54{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:48,076 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d483e31{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:13:48,076 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:13:48,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cfe5350{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:13:48,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f7f175c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,STOPPED} 2024-12-06T08:13:48,080 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:13:48,080 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:13:48,080 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:13:48,080 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1291778185-172.17.0.2-1733472802617 (Datanode Uuid 2a740072-7170-41d4-80ae-eee6d432026c) service to localhost/127.0.0.1:44775 2024-12-06T08:13:48,081 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data9/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:48,081 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data10/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:48,082 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:13:48,084 WARN [sync.1 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]] 2024-12-06T08:13:48,084 WARN [sync.1 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]] 2024-12-06T08:13:48,084 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C44681%2C1733472803392:(num 1733472816002) roll requested 2024-12-06T08:13:48,085 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.1733472828085 2024-12-06T08:13:48,088 WARN [Thread-713 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:48,088 WARN [Thread-713 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK], DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]) is bad. 2024-12-06T08:13:48,088 WARN [Thread-713 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741841_1023 2024-12-06T08:13:48,091 WARN [Thread-713 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK] 2024-12-06T08:13:48,100 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472816002 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472828085 2024-12-06T08:13:48,100 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43607:43607),(127.0.0.1/127.0.0.1:41899:41899)] 2024-12-06T08:13:48,100 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 is not closed yet, will try archiving it next time 2024-12-06T08:13:48,101 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472816002 is not closed yet, will try archiving it next time 2024-12-06T08:13:48,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32945 is added to blk_1073741840_1022 (size=2431) 2024-12-06T08:13:48,503 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 is not closed yet, will try archiving it next time 2024-12-06T08:13:50,652 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4c1530ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32945, datanodeUuid=806043b0-da6a-43cb-a70f-24154aa17528, infoPort=43607, infoSecurePort=0, ipcPort=44463, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741840_1022 to 127.0.0.1:44359 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:52,088 WARN [ResponseProcessor for block BP-1291778185-172.17.0.2-1733472802617:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1291778185-172.17.0.2-1733472802617:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,088 WARN [DataStreamer for file /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472828085 block BP-1291778185-172.17.0.2-1733472802617:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK], DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]) is bad. 2024-12-06T08:13:52,089 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39346 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:32945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39346 dst: /127.0.0.1:32945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:52,089 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39668 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39668 dst: /127.0.0.1:35169 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:52,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@555ad0cc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:52,093 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15e1fc81{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:13:52,093 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:13:52,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b66ddd8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:13:52,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b47a450{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,STOPPED} 2024-12-06T08:13:52,095 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:13:52,095 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:13:52,095 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:13:52,095 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1291778185-172.17.0.2-1733472802617 (Datanode Uuid 806043b0-da6a-43cb-a70f-24154aa17528) service to localhost/127.0.0.1:44775 2024-12-06T08:13:52,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data5/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:52,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data6/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:13:52,097 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:13:52,099 WARN [sync.4 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK]] 2024-12-06T08:13:52,099 WARN [sync.4 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK]] 2024-12-06T08:13:52,099 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C44681%2C1733472803392:(num 1733472828085) roll requested 2024-12-06T08:13:52,100 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.1733472832100 2024-12-06T08:13:52,103 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,103 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK], DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]) is bad. 2024-12-06T08:13:52,103 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741843_1026 2024-12-06T08:13:52,104 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK] 2024-12-06T08:13:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44681 {}] regionserver.HRegion(8581): Flush requested on 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:13:52,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1fe245b8d8375ab9b801a570a1b2b5e8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:13:52,105 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,106 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK], DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]) is bad. 2024-12-06T08:13:52,106 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741844_1027 2024-12-06T08:13:52,106 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK] 2024-12-06T08:13:52,108 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,108 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK], DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]) is bad. 2024-12-06T08:13:52,108 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741845_1028 2024-12-06T08:13:52,109 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK] 2024-12-06T08:13:52,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/.tmp/info/d013465fecd94fdc99c4eadfc8142ad8 is 1080, key is row0002/info:/1733472828083/Put/seqid=0 2024-12-06T08:13:52,125 WARN [Thread-723 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39682 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data8]'}, localName='127.0.0.1:35169', datanodeUuid='8df28f4f-c606-48f4-8fa4-384c429e5df5', xmitsInProgress=0}:Exception transferring block BP-1291778185-172.17.0.2-1733472802617:blk_1073741846_1029 to mirror 127.0.0.1:32945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:52,125 WARN [Thread-723 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK], DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]) is bad. 2024-12-06T08:13:52,126 WARN [Thread-723 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741846_1029 2024-12-06T08:13:52,126 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39682 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T08:13:52,126 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39682 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39682 dst: /127.0.0.1:35169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:52,126 WARN [Thread-723 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK] 2024-12-06T08:13:52,127 WARN [IPC Server handler 3 on default port 44775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T08:13:52,127 WARN [IPC Server handler 3 on default port 44775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T08:13:52,127 WARN [IPC Server handler 3 on default port 44775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T08:13:52,127 WARN [Thread-724 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,128 WARN [Thread-724 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK], DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]) is bad. 2024-12-06T08:13:52,128 WARN [Thread-724 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741847_1030 2024-12-06T08:13:52,129 WARN [Thread-724 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK] 2024-12-06T08:13:52,130 WARN [Thread-724 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,130 WARN [Thread-724 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]) is bad. 2024-12-06T08:13:52,130 WARN [Thread-724 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741849_1032 2024-12-06T08:13:52,131 WARN [Thread-724 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK] 2024-12-06T08:13:52,132 WARN [Thread-724 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,132 WARN [Thread-724 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]) is bad. 2024-12-06T08:13:52,132 WARN [Thread-724 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741850_1033 2024-12-06T08:13:52,133 WARN [Thread-724 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK] 2024-12-06T08:13:52,134 WARN [Thread-724 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,135 WARN [Thread-724 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK], DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]) is bad. 2024-12-06T08:13:52,135 WARN [Thread-724 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741851_1034 2024-12-06T08:13:52,135 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472828085 with entries=13, filesize=14.10 KB; new WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832100 2024-12-06T08:13:52,135 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41899:41899)] 2024-12-06T08:13:52,135 WARN [Thread-724 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK] 2024-12-06T08:13:52,135 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 is not closed yet, will try archiving it next time 2024-12-06T08:13:52,135 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472828085 is not closed yet, will try archiving it next time 2024-12-06T08:13:52,136 WARN [IPC Server handler 4 on default port 44775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T08:13:52,136 WARN [IPC Server handler 4 on default port 44775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T08:13:52,136 WARN [IPC Server handler 4 on default port 44775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T08:13:52,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741842_1025 (size=14443) 2024-12-06T08:13:52,138 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 is not closed yet, will try archiving it next time 2024-12-06T08:13:52,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741852_1035 (size=10347) 2024-12-06T08:13:52,321 WARN [sync.2 {}] wal.FSHLog(750): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK]] 2024-12-06T08:13:52,321 WARN [sync.2 {}] wal.FSHLog(721): Requesting log roll because of low replication, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK]] 2024-12-06T08:13:52,321 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C44681%2C1733472803392:(num 1733472832100) roll requested 2024-12-06T08:13:52,322 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.1733472832322 2024-12-06T08:13:52,325 WARN [Thread-732 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,326 WARN [Thread-732 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]) is bad. 2024-12-06T08:13:52,326 WARN [Thread-732 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741853_1036 2024-12-06T08:13:52,326 WARN [Thread-732 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK] 2024-12-06T08:13:52,328 WARN [Thread-732 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,328 WARN [Thread-732 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]) is bad. 2024-12-06T08:13:52,328 WARN [Thread-732 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741854_1037 2024-12-06T08:13:52,328 WARN [Thread-732 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44291,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK] 2024-12-06T08:13:52,331 WARN [Thread-732 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44359 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39716 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data8]'}, localName='127.0.0.1:35169', datanodeUuid='8df28f4f-c606-48f4-8fa4-384c429e5df5', xmitsInProgress=0}:Exception transferring block BP-1291778185-172.17.0.2-1733472802617:blk_1073741855_1038 to mirror 127.0.0.1:44359 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:52,331 WARN [Thread-732 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]) is bad. 2024-12-06T08:13:52,331 WARN [Thread-732 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741855_1038 2024-12-06T08:13:52,331 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39716 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T08:13:52,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1636808527_22 at /127.0.0.1:39716 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39716 dst: /127.0.0.1:35169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:52,332 WARN [Thread-732 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK] 2024-12-06T08:13:52,333 WARN [Thread-732 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:52,333 WARN [Thread-732 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK], DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]) is bad. 2024-12-06T08:13:52,333 WARN [Thread-732 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741856_1039 2024-12-06T08:13:52,334 WARN [Thread-732 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK] 2024-12-06T08:13:52,335 WARN [IPC Server handler 1 on default port 44775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-06T08:13:52,335 WARN [IPC Server handler 1 on default port 44775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-06T08:13:52,335 WARN [IPC Server handler 1 on default port 44775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-06T08:13:52,342 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832100 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832322 2024-12-06T08:13:52,342 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41899:41899)] 2024-12-06T08:13:52,342 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 is not closed yet, will try archiving it next time 2024-12-06T08:13:52,342 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832100 is not closed yet, will try archiving it next time 2024-12-06T08:13:52,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741848_1031 (size=1261) 2024-12-06T08:13:52,523 WARN [sync.4 {}] wal.FSHLog(760): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-06T08:13:52,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/.tmp/info/d013465fecd94fdc99c4eadfc8142ad8 2024-12-06T08:13:52,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/.tmp/info/d013465fecd94fdc99c4eadfc8142ad8 as hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/info/d013465fecd94fdc99c4eadfc8142ad8 2024-12-06T08:13:52,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/info/d013465fecd94fdc99c4eadfc8142ad8, entries=5, sequenceid=12, filesize=10.1 K 2024-12-06T08:13:52,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 1fe245b8d8375ab9b801a570a1b2b5e8 in 455ms, sequenceid=12, compaction requested=false 2024-12-06T08:13:52,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1fe245b8d8375ab9b801a570a1b2b5e8: 2024-12-06T08:13:52,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:13:52,739 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:13:52,740 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:13:52,740 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:13:52,740 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:13:52,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@560500bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:13:52,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ca57f19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:13:52,745 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 is not closed yet, will try archiving it next time 2024-12-06T08:13:52,745 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472816002 to hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs/b6b797fc3981%2C44681%2C1733472803392.1733472816002 2024-12-06T08:13:52,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e7d75d3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/java.io.tmpdir/jetty-localhost-37395-hadoop-hdfs-3_4_1-tests_jar-_-any-16830534824772491295/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:13:52,858 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11623059{HTTP/1.1, (http/1.1)}{localhost:37395} 2024-12-06T08:13:52,858 INFO [Time-limited test {}] server.Server(415): Started @147241ms 2024-12-06T08:13:52,859 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:13:52,979 WARN [Thread-752 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:13:52,987 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccb6d8840693c474 with lease ID 0x69c4730966cabc8f: from storage DS-047cb55e-8b7a-4fac-961c-cf22fe581033 node DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:52,987 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xccb6d8840693c474 with lease ID 0x69c4730966cabc8f: from storage DS-125022d5-a1c0-4e9a-ae2d-473fc423cbd9 node DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:13:53,308 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:13:53,807 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@69b2bee9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35169, datanodeUuid=8df28f4f-c606-48f4-8fa4-384c429e5df5, infoPort=41899, infoSecurePort=0, ipcPort=38003, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741852_1035 to 127.0.0.1:39283 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:53,807 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67755c77[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35169, datanodeUuid=8df28f4f-c606-48f4-8fa4-384c429e5df5, infoPort=41899, infoSecurePort=0, ipcPort=38003, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741842_1025 to 127.0.0.1:44359 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:54,000 WARN [master/b6b797fc3981:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=96, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:54,000 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C37831%2C1733472803338:(num 1733472803914) roll requested 2024-12-06T08:13:54,001 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C37831%2C1733472803338.1733472834001 2024-12-06T08:13:54,001 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:54,001 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:54,004 WARN [Thread-776 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:54,005 WARN [Thread-776 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]) is bad. 2024-12-06T08:13:54,005 WARN [Thread-776 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741858_1041 2024-12-06T08:13:54,005 WARN [Thread-776 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK] 2024-12-06T08:13:54,008 WARN [Thread-776 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44359 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:54,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412925412_22 at /127.0.0.1:49838 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data8]'}, localName='127.0.0.1:35169', datanodeUuid='8df28f4f-c606-48f4-8fa4-384c429e5df5', xmitsInProgress=0}:Exception transferring block BP-1291778185-172.17.0.2-1733472802617:blk_1073741859_1042 to mirror 127.0.0.1:44359 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:54,008 WARN [Thread-776 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35169,DS-c3a7f7c8-749a-4de6-a734-5992f832b964,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]) is bad. 2024-12-06T08:13:54,008 WARN [Thread-776 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741859_1042 2024-12-06T08:13:54,008 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_412925412_22 at /127.0.0.1:49838 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-06T08:13:54,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_412925412_22 at /127.0.0.1:49838 [Receiving block BP-1291778185-172.17.0.2-1733472802617:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49838 dst: /127.0.0.1:35169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:54,009 WARN [Thread-776 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK] 2024-12-06T08:13:54,016 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=96, requesting roll of WAL 2024-12-06T08:13:54,016 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 with entries=93, filesize=46.05 KB; new WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472834001 2024-12-06T08:13:54,017 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39645:39645),(127.0.0.1/127.0.0.1:41899:41899)] 2024-12-06T08:13:54,017 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 is not closed yet, will try archiving it next time 2024-12-06T08:13:54,017 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:54,017 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:13:54,017 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 2024-12-06T08:13:54,017 WARN [IPC Server handler 4 on default port 44775 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 has not been closed. Lease recovery is in progress. RecoveryId = 1044 for block blk_1073741830_1006 2024-12-06T08:13:54,018 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 after 1ms 2024-12-06T08:13:54,806 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67755c77[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35169, datanodeUuid=8df28f4f-c606-48f4-8fa4-384c429e5df5, infoPort=41899, infoSecurePort=0, ipcPort=38003, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741848_1031 to 127.0.0.1:44359 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:13:55,052 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:13:55,053 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:13:56,300 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:13:56,302 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40858, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:13:58,019 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338/b6b797fc3981%2C37831%2C1733472803338.1733472803914 after 4002ms 2024-12-06T08:14:03,002 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@74a1cc72 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1291778185-172.17.0.2-1733472802617:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39283,null,null]) java.net.ConnectException: Call From b6b797fc3981/172.17.0.2 to localhost:42185 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T08:14:03,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741833_1021 (size=959) 2024-12-06T08:14:04,985 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6eb6b0c5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741835_1011 to 127.0.0.1:32945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:04,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741837_1013 (size=393) 2024-12-06T08:14:05,983 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@451ffe00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741831_1007 to 127.0.0.1:32945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:05,983 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6eb6b0c5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741829_1005 to 127.0.0.1:44359 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:06,072 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T08:14:06,072 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T08:14:07,983 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@451ffe00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741828_1004 to 127.0.0.1:32945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:07,983 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6eb6b0c5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741832_1008 to 127.0.0.1:44359 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:08,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6eb6b0c5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741826_1002 to 127.0.0.1:44359 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:08,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741833_1021 (size=959) 2024-12-06T08:14:10,712 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1fe245b8d8375ab9b801a570a1b2b5e8, had cached 0 bytes from a total of 10347 2024-12-06T08:14:10,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@451ffe00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741827_1003 to 127.0.0.1:32945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:10,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:14:11,729 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.1733472851729 2024-12-06T08:14:11,732 WARN [Thread-791 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,733 WARN [Thread-791 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741861_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK], DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]) is bad. 2024-12-06T08:14:11,733 WARN [Thread-791 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741861_1045 2024-12-06T08:14:11,733 WARN [Thread-791 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK] 2024-12-06T08:14:11,740 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832322 with entries=2, filesize=1.57 KB; new WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472851729 2024-12-06T08:14:11,740 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39645:39645),(127.0.0.1/127.0.0.1:41899:41899)] 2024-12-06T08:14:11,740 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832322 is not closed yet, will try archiving it next time 2024-12-06T08:14:11,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741857_1040 (size=1618) 2024-12-06T08:14:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44681 {}] regionserver.HRegion(8581): Flush requested on 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:14:11,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1fe245b8d8375ab9b801a570a1b2b5e8 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-06T08:14:11,744 INFO [sync.3 {}] wal.FSHLog(777): LowReplication-Roller was enabled. 2024-12-06T08:14:11,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/.tmp/info/0a4febce06ad4256b6cfd343aa8906b3 is 1080, key is row0007/info:/1733472832105/Put/seqid=0 2024-12-06T08:14:11,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741863_1047 (size=13583) 2024-12-06T08:14:11,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741863_1047 (size=13583) 2024-12-06T08:14:11,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=25 (bloomFilter=true), to=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/.tmp/info/0a4febce06ad4256b6cfd343aa8906b3 2024-12-06T08:14:11,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T08:14:11,757 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:14:11,757 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d72d7be to 127.0.0.1:55687 2024-12-06T08:14:11,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:14:11,757 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:14:11,757 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1950158278, stopped=false 2024-12-06T08:14:11,757 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b6b797fc3981,37831,1733472803338 2024-12-06T08:14:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:14:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:11,760 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T08:14:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:14:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:11,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:14:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:14:11,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:11,760 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,44681,1733472803392' ***** 2024-12-06T08:14:11,760 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:14:11,760 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,44247,1733472805257' ***** 2024-12-06T08:14:11,761 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:14:11,761 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:14:11,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:14:11,761 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:14:11,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:14:11,761 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:14:11,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:14:11,761 INFO [RS:1;b6b797fc3981:44247 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:14:11,762 INFO [RS:1;b6b797fc3981:44247 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:14:11,762 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,44247,1733472805257 2024-12-06T08:14:11,762 DEBUG [RS:1;b6b797fc3981:44247 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:14:11,762 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,44247,1733472805257; all regions closed. 2024-12-06T08:14:11,762 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44247,1733472805257 2024-12-06T08:14:11,763 WARN [WAL-Shutdown-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,763 ERROR [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1664): Shutdown / close of WAL failed: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... 2024-12-06T08:14:11,763 DEBUG [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1665): Shutdown / close exception details: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,763 DEBUG [RS:1;b6b797fc3981:44247 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:14:11,763 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:14:11,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/.tmp/info/0a4febce06ad4256b6cfd343aa8906b3 as hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/info/0a4febce06ad4256b6cfd343aa8906b3 2024-12-06T08:14:11,763 INFO [RS:1;b6b797fc3981:44247 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:14:11,764 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:14:11,764 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:14:11,764 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:14:11,764 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:14:11,764 INFO [RS:1;b6b797fc3981:44247 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44247 2024-12-06T08:14:11,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,44247,1733472805257 2024-12-06T08:14:11,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:14:11,767 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,44247,1733472805257] 2024-12-06T08:14:11,767 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,44247,1733472805257; numProcessing=1 2024-12-06T08:14:11,768 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,44247,1733472805257 already deleted, retry=false 2024-12-06T08:14:11,768 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,44247,1733472805257 expired; onlineServers=1 2024-12-06T08:14:11,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/info/0a4febce06ad4256b6cfd343aa8906b3, entries=8, sequenceid=25, filesize=13.3 K 2024-12-06T08:14:11,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~10.50 KB/10757, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for 1fe245b8d8375ab9b801a570a1b2b5e8 in 29ms, sequenceid=25, compaction requested=false 2024-12-06T08:14:11,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1fe245b8d8375ab9b801a570a1b2b5e8: 2024-12-06T08:14:11,772 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=23.4 K, sizeToCheck=16.0 K 2024-12-06T08:14:11,772 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:14:11,772 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1fe245b8d8375ab9b801a570a1b2b5e8/info/0a4febce06ad4256b6cfd343aa8906b3 because midkey is the same as first or last row 2024-12-06T08:14:11,772 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:14:11,772 INFO [RS:0;b6b797fc3981:44681 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:14:11,772 INFO [RS:0;b6b797fc3981:44681 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:14:11,772 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(3579): Received CLOSE for 7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:14:11,772 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(3579): Received CLOSE for 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:14:11,773 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,44681,1733472803392 2024-12-06T08:14:11,773 DEBUG [RS:0;b6b797fc3981:44681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:14:11,773 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:14:11,773 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:14:11,773 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 7fad0d052eed1f21c2de95b566a7b754, disabling compactions & flushes 2024-12-06T08:14:11,773 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:14:11,773 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. after waiting 0 ms 2024-12-06T08:14:11,773 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:14:11,773 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1603): Online Regions={7fad0d052eed1f21c2de95b566a7b754=hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754., 1fe245b8d8375ab9b801a570a1b2b5e8=TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8., 1588230740=hbase:meta,,1.1588230740} 2024-12-06T08:14:11,773 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 7fad0d052eed1f21c2de95b566a7b754 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T08:14:11,773 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 1fe245b8d8375ab9b801a570a1b2b5e8, 7fad0d052eed1f21c2de95b566a7b754 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:14:11,773 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:14:11,773 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:14:11,774 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.87 KB heapSize=5.40 KB 2024-12-06T08:14:11,774 WARN [RS_OPEN_META-regionserver/b6b797fc3981:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,774 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C44681%2C1733472803392.meta:.meta(num 1733472804629) roll requested 2024-12-06T08:14:11,774 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:14:11,774 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C44681%2C1733472803392.meta.1733472851774.meta 2024-12-06T08:14:11,775 ERROR [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server b6b797fc3981,44681,1733472803392: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,775 ERROR [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-06T08:14:11,778 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-06T08:14:11,779 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-06T08:14:11,779 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-06T08:14:11,779 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-06T08:14:11,779 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 293304288 }, "NonHeapMemoryUsage": { "committed": 162988032, "init": 7667712, "max": -1, "used": 161183312 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-06T08:14:11,782 WARN [Thread-805 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,782 WARN [Thread-805 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741864_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK], DatanodeInfoWithStorage[127.0.0.1:39685,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]) is bad. 2024-12-06T08:14:11,782 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37831 {}] master.MasterRpcServices(626): b6b797fc3981,44681,1733472803392 reported a fatal error: ***** ABORTING region server b6b797fc3981,44681,1733472803392: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-06T08:14:11,782 WARN [Thread-805 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741864_1048 2024-12-06T08:14:11,783 WARN [Thread-805 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK] 2024-12-06T08:14:11,784 WARN [Thread-805 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,785 WARN [Thread-805 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741865_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK], DatanodeInfoWithStorage[127.0.0.1:39685,DS-047cb55e-8b7a-4fac-961c-cf22fe581033,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]) is bad. 2024-12-06T08:14:11,785 WARN [Thread-805 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741865_1049 2024-12-06T08:14:11,785 WARN [Thread-805 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK] 2024-12-06T08:14:11,799 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754/.tmp/info/bfc59f0ae81c4f3ba4fcd9f83bd8ebb3 is 45, key is default/info:d/1733472805099/Put/seqid=0 2024-12-06T08:14:11,801 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-06T08:14:11,801 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta with entries=11, filesize=3.63 KB; new WAL /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472851774.meta 2024-12-06T08:14:11,801 WARN [Thread-806 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,801 WARN [Thread-806 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1291778185-172.17.0.2-1733472802617:blk_1073741867_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK], DatanodeInfoWithStorage[127.0.0.1:32945,DS-cd90c2ff-8646-4934-8668-a01a509adf05,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK]) is bad. 2024-12-06T08:14:11,801 WARN [Thread-806 {}] hdfs.DataStreamer(1850): Abandoning BP-1291778185-172.17.0.2-1733472802617:blk_1073741867_1051 2024-12-06T08:14:11,801 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41899:41899),(127.0.0.1/127.0.0.1:39645:39645)] 2024-12-06T08:14:11,801 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta is not closed yet, will try archiving it next time 2024-12-06T08:14:11,801 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,801 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39283,DS-81e65821-ebb2-493a-b31b-32904614e919,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:11,801 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta 2024-12-06T08:14:11,802 WARN [Thread-806 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44359,DS-8327caf6-8cc0-4514-878b-7c03e8d6ae8e,DISK] 2024-12-06T08:14:11,802 WARN [IPC Server handler 2 on default port 44775 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta has not been closed. Lease recovery is in progress. RecoveryId = 1052 for block blk_1073741834_1010 2024-12-06T08:14:11,802 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta after 1ms 2024-12-06T08:14:11,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741868_1053 (size=5037) 2024-12-06T08:14:11,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741868_1053 (size=5037) 2024-12-06T08:14:11,808 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754/.tmp/info/bfc59f0ae81c4f3ba4fcd9f83bd8ebb3 2024-12-06T08:14:11,815 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754/.tmp/info/bfc59f0ae81c4f3ba4fcd9f83bd8ebb3 as hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754/info/bfc59f0ae81c4f3ba4fcd9f83bd8ebb3 2024-12-06T08:14:11,820 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754/info/bfc59f0ae81c4f3ba4fcd9f83bd8ebb3, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T08:14:11,821 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 7fad0d052eed1f21c2de95b566a7b754 in 48ms, sequenceid=6, compaction requested=false 2024-12-06T08:14:11,826 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/data/hbase/namespace/7fad0d052eed1f21c2de95b566a7b754/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T08:14:11,826 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:14:11,826 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 7fad0d052eed1f21c2de95b566a7b754: 2024-12-06T08:14:11,827 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733472804684.7fad0d052eed1f21c2de95b566a7b754. 2024-12-06T08:14:11,827 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 1fe245b8d8375ab9b801a570a1b2b5e8, disabling compactions & flushes 2024-12-06T08:14:11,827 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:11,827 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:11,827 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. after waiting 0 ms 2024-12-06T08:14:11,827 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:11,827 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 1fe245b8d8375ab9b801a570a1b2b5e8: 2024-12-06T08:14:11,827 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:11,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:14:11,867 INFO [RS:1;b6b797fc3981:44247 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,44247,1733472805257; zookeeper connection closed. 2024-12-06T08:14:11,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44247-0x100666392d80003, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:14:11,867 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3d80a327 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3d80a327 2024-12-06T08:14:11,973 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(3579): Received CLOSE for 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:14:11,973 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:14:11,974 DEBUG [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 1fe245b8d8375ab9b801a570a1b2b5e8 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 1fe245b8d8375ab9b801a570a1b2b5e8, disabling compactions & flushes 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:14:11,974 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:11,974 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. after waiting 0 ms 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 1fe245b8d8375ab9b801a570a1b2b5e8: 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-06T08:14:11,974 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnDatanodeDeath,,1733472805360.1fe245b8d8375ab9b801a570a1b2b5e8. 2024-12-06T08:14:12,082 INFO [regionserver/b6b797fc3981:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:14:12,087 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T08:14:12,087 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T08:14:12,143 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472804221 to hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs/b6b797fc3981%2C44681%2C1733472803392.1733472804221 2024-12-06T08:14:12,144 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472828085 to hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs/b6b797fc3981%2C44681%2C1733472803392.1733472828085 2024-12-06T08:14:12,145 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832100 to hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs/b6b797fc3981%2C44681%2C1733472803392.1733472832100 2024-12-06T08:14:12,146 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.1733472832322 to hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/oldWALs/b6b797fc3981%2C44681%2C1733472803392.1733472832322 2024-12-06T08:14:12,174 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-06T08:14:12,174 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,44681,1733472803392; all regions closed. 2024-12-06T08:14:12,174 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392 2024-12-06T08:14:12,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741866_1050 (size=93) 2024-12-06T08:14:12,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741866_1050 (size=93) 2024-12-06T08:14:12,983 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6eb6b0c5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741838_1014 to 127.0.0.1:32945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:12,983 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@451ffe00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39685, datanodeUuid=633c6123-6914-48b4-b517-93772ad00228, infoPort=39645, infoSecurePort=0, ipcPort=32789, storageInfo=lv=-57;cid=testClusterID;nsid=828590894;c=1733472802617):Failed to transfer BP-1291778185-172.17.0.2-1733472802617:blk_1073741836_1012 to 127.0.0.1:44359 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:13,006 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@16e2dc1b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1291778185-172.17.0.2-1733472802617:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:39283,null,null]) java.net.ConnectException: Call From b6b797fc3981/172.17.0.2 to localhost:42185 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T08:14:13,324 INFO [regionserver/b6b797fc3981:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:14:13,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741830_1044 (size=47160) 2024-12-06T08:14:14,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741857_1040 (size=1618) 2024-12-06T08:14:15,803 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta after 4002ms 2024-12-06T08:14:16,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:16,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:16,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:16,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:16,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:16,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:17,177 ERROR [WAL-Shutdown-0 {}] wal.FSHLog(508): We have waited 5 seconds but the close of writer(s) doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-06T08:14:17,178 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392 2024-12-06T08:14:17,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741862_1046 (size=13280) 2024-12-06T08:14:17,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741862_1046 (size=13280) 2024-12-06T08:14:17,181 DEBUG [RS:0;b6b797fc3981:44681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:14:17,181 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:14:17,181 INFO [RS:0;b6b797fc3981:44681 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:14:17,182 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:14:17,182 INFO [RS:0;b6b797fc3981:44681 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44681 2024-12-06T08:14:17,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,44681,1733472803392 2024-12-06T08:14:17,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:14:17,185 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,44681,1733472803392] 2024-12-06T08:14:17,185 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,44681,1733472803392; numProcessing=2 2024-12-06T08:14:17,187 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,44681,1733472803392 already deleted, retry=false 2024-12-06T08:14:17,187 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,44681,1733472803392 expired; onlineServers=0 2024-12-06T08:14:17,187 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,37831,1733472803338' ***** 2024-12-06T08:14:17,187 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:14:17,187 DEBUG [M:0;b6b797fc3981:37831 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@525d61f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:14:17,187 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,37831,1733472803338 2024-12-06T08:14:17,187 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,37831,1733472803338; all regions closed. 2024-12-06T08:14:17,187 DEBUG [M:0;b6b797fc3981:37831 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:14:17,187 DEBUG [M:0;b6b797fc3981:37831 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:14:17,187 DEBUG [M:0;b6b797fc3981:37831 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:14:17,187 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:14:17,187 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472804003 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472804003,5,FailOnTimeoutGroup] 2024-12-06T08:14:17,187 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472804003 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472804003,5,FailOnTimeoutGroup] 2024-12-06T08:14:17,188 INFO [M:0;b6b797fc3981:37831 {}] hbase.ChoreService(370): Chore service for: master/b6b797fc3981:0 had [] on shutdown 2024-12-06T08:14:17,188 DEBUG [M:0;b6b797fc3981:37831 {}] master.HMaster(1733): Stopping service threads 2024-12-06T08:14:17,188 INFO [M:0;b6b797fc3981:37831 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:14:17,188 INFO [M:0;b6b797fc3981:37831 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:14:17,188 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:14:17,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:14:17,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:17,189 DEBUG [M:0;b6b797fc3981:37831 {}] zookeeper.ZKUtil(347): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:14:17,189 WARN [M:0;b6b797fc3981:37831 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:14:17,189 INFO [M:0;b6b797fc3981:37831 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T08:14:17,189 INFO [M:0;b6b797fc3981:37831 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:14:17,189 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:14:17,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:14:17,189 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:17,189 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:17,189 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:14:17,189 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:17,190 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.09 KB heapSize=49.30 KB 2024-12-06T08:14:17,206 DEBUG [M:0;b6b797fc3981:37831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ebafcfcc10e44d94b84a63b3f99354a6 is 82, key is hbase:meta,,1/info:regioninfo/1733472804659/Put/seqid=0 2024-12-06T08:14:17,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741869_1054 (size=5672) 2024-12-06T08:14:17,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741869_1054 (size=5672) 2024-12-06T08:14:17,213 INFO [M:0;b6b797fc3981:37831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ebafcfcc10e44d94b84a63b3f99354a6 2024-12-06T08:14:17,234 DEBUG [M:0;b6b797fc3981:37831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/838e12954506462bbdd271c826eb1d5f is 775, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733472805742/Put/seqid=0 2024-12-06T08:14:17,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741870_1055 (size=7466) 2024-12-06T08:14:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741870_1055 (size=7466) 2024-12-06T08:14:17,240 INFO [M:0;b6b797fc3981:37831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.42 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/838e12954506462bbdd271c826eb1d5f 2024-12-06T08:14:17,261 DEBUG [M:0;b6b797fc3981:37831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a6460a26548a4f9bbc570fc2a2c70830 is 69, key is b6b797fc3981,44247,1733472805257/rs:state/1733472805305/Put/seqid=0 2024-12-06T08:14:17,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741871_1056 (size=5224) 2024-12-06T08:14:17,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741871_1056 (size=5224) 2024-12-06T08:14:17,267 INFO [M:0;b6b797fc3981:37831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a6460a26548a4f9bbc570fc2a2c70830 2024-12-06T08:14:17,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:14:17,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44681-0x100666392d80001, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:14:17,286 INFO [RS:0;b6b797fc3981:44681 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,44681,1733472803392; zookeeper connection closed. 2024-12-06T08:14:17,286 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e28782 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e28782 2024-12-06T08:14:17,286 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-06T08:14:17,287 DEBUG [M:0;b6b797fc3981:37831 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17e83827641543e0958d5699f1bba05d is 52, key is load_balancer_on/state:d/1733472805240/Put/seqid=0 2024-12-06T08:14:17,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741872_1057 (size=5056) 2024-12-06T08:14:17,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741872_1057 (size=5056) 2024-12-06T08:14:17,292 INFO [M:0;b6b797fc3981:37831 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17e83827641543e0958d5699f1bba05d 2024-12-06T08:14:17,298 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ebafcfcc10e44d94b84a63b3f99354a6 as hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ebafcfcc10e44d94b84a63b3f99354a6 2024-12-06T08:14:17,304 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ebafcfcc10e44d94b84a63b3f99354a6, entries=8, sequenceid=97, filesize=5.5 K 2024-12-06T08:14:17,305 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/838e12954506462bbdd271c826eb1d5f as hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/838e12954506462bbdd271c826eb1d5f 2024-12-06T08:14:17,311 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/838e12954506462bbdd271c826eb1d5f, entries=11, sequenceid=97, filesize=7.3 K 2024-12-06T08:14:17,312 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a6460a26548a4f9bbc570fc2a2c70830 as hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a6460a26548a4f9bbc570fc2a2c70830 2024-12-06T08:14:17,318 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a6460a26548a4f9bbc570fc2a2c70830, entries=2, sequenceid=97, filesize=5.1 K 2024-12-06T08:14:17,319 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17e83827641543e0958d5699f1bba05d as hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17e83827641543e0958d5699f1bba05d 2024-12-06T08:14:17,325 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17e83827641543e0958d5699f1bba05d, entries=1, sequenceid=97, filesize=4.9 K 2024-12-06T08:14:17,326 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.09 KB/41051, heapSize ~49.23 KB/50416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=97, compaction requested=false 2024-12-06T08:14:17,332 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:17,332 DEBUG [M:0;b6b797fc3981:37831 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:14:17,332 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/MasterData/WALs/b6b797fc3981,37831,1733472803338 2024-12-06T08:14:17,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35169 is added to blk_1073741860_1043 (size=757) 2024-12-06T08:14:17,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39685 is added to blk_1073741860_1043 (size=757) 2024-12-06T08:14:17,335 INFO [M:0;b6b797fc3981:37831 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T08:14:17,335 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:14:17,335 INFO [M:0;b6b797fc3981:37831 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37831 2024-12-06T08:14:17,337 DEBUG [M:0;b6b797fc3981:37831 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b6b797fc3981,37831,1733472803338 already deleted, retry=false 2024-12-06T08:14:17,361 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:14:17,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:17,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:17,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:17,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:17,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:17,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:17,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:14:17,439 INFO [M:0;b6b797fc3981:37831 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,37831,1733472803338; zookeeper connection closed. 2024-12-06T08:14:17,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37831-0x100666392d80000, quorum=127.0.0.1:55687, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:14:17,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e7d75d3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:17,442 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11623059{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:14:17,442 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:14:17,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ca57f19{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:14:17,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@560500bb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,STOPPED} 2024-12-06T08:14:17,443 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@148e333d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1291778185-172.17.0.2-1733472802617:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39283,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:42185 , LocalHost:localPort b6b797fc3981/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-06T08:14:17,444 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:14:17,444 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:14:17,444 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1291778185-172.17.0.2-1733472802617 (Datanode Uuid 633c6123-6914-48b4-b517-93772ad00228) service to localhost/127.0.0.1:44775 2024-12-06T08:14:17,444 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:14:17,445 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@148e333d {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-1291778185-172.17.0.2-1733472802617 2024-12-06T08:14:17,445 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data3/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:17,445 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data4/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:17,446 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:14:17,447 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57fbd89{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:17,448 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e9e9343{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:14:17,448 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:14:17,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@548a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:14:17,448 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@347096cd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,STOPPED} 2024-12-06T08:14:17,449 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:14:17,449 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:14:17,449 WARN [BP-1291778185-172.17.0.2-1733472802617 heartbeating to localhost/127.0.0.1:44775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1291778185-172.17.0.2-1733472802617 (Datanode Uuid 8df28f4f-c606-48f4-8fa4-384c429e5df5) service to localhost/127.0.0.1:44775 2024-12-06T08:14:17,450 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:14:17,450 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data7/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:17,450 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/cluster_fed8da4d-c68a-3847-e464-f4e370a03985/dfs/data/data8/current/BP-1291778185-172.17.0.2-1733472802617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:17,450 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:14:17,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b038f36{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:14:17,457 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34202dec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:14:17,457 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:14:17,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@411ed8c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:14:17,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6be9adb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir/,STOPPED} 2024-12-06T08:14:17,465 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:14:17,495 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T08:14:17,503 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=86 (was 62) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44775 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44775 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44775 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:44775 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44775 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:44775 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$$Lambda$796/0x00007f8f4cb88db0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44775 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44775 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:44775 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-12-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-13-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=428 (was 401) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=130 (was 176), ProcessCount=11 (was 11), AvailableMemoryMB=8119 (was 8496) 2024-12-06T08:14:17,509 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=86, OpenFileDescriptor=428, MaxFileDescriptor=1048576, SystemLoadAverage=130, ProcessCount=11, AvailableMemoryMB=8119 2024-12-06T08:14:17,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:14:17,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.log.dir so I do NOT create it in target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5 2024-12-06T08:14:17,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/145d2a6a-24dc-6e8a-fa9d-f81894b8eca4/hadoop.tmp.dir so I do NOT create it in target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5 2024-12-06T08:14:17,509 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967, deleteOnExit=true 2024-12-06T08:14:17,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T08:14:17,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/test.cache.data in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T08:14:17,510 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:14:17,510 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:14:17,511 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:14:17,525 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:14:17,598 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:17,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:14:17,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:14:17,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:14:17,607 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:14:17,609 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:17,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@253120b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:14:17,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@579b5c9d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:14:17,734 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@340ac765{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir/jetty-localhost-45817-hadoop-hdfs-3_4_1-tests_jar-_-any-984350670426484374/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:14:17,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39e2672a{HTTP/1.1, (http/1.1)}{localhost:45817} 2024-12-06T08:14:17,735 INFO [Time-limited test {}] server.Server(415): Started @172117ms 2024-12-06T08:14:17,748 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:14:17,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:17,823 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:17,828 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:14:17,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:14:17,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:14:17,829 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:14:17,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63044f0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:14:17,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e67e938{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:14:17,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31fce438{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir/jetty-localhost-42253-hadoop-hdfs-3_4_1-tests_jar-_-any-9981257340391388479/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:17,949 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c2473d5{HTTP/1.1, (http/1.1)}{localhost:42253} 2024-12-06T08:14:17,949 INFO [Time-limited test {}] server.Server(415): Started @172332ms 2024-12-06T08:14:17,951 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:14:17,983 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:17,986 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:14:17,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:14:17,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:14:17,988 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:14:17,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64b5487{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:14:17,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3800cd12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:14:18,041 WARN [Thread-924 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data2/current/BP-775885647-172.17.0.2-1733472857543/current, will proceed with Du for space computation calculation, 2024-12-06T08:14:18,041 WARN [Thread-923 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data1/current/BP-775885647-172.17.0.2-1733472857543/current, will proceed with Du for space computation calculation, 2024-12-06T08:14:18,064 WARN [Thread-902 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:14:18,067 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11a12935f5274657 with lease ID 0x98969c9208cf92c3: Processing first storage report for DS-105e7187-8301-43c9-93e5-849e3a6c177a from datanode DatanodeRegistration(127.0.0.1:35661, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=39281, infoSecurePort=0, ipcPort=36897, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543) 2024-12-06T08:14:18,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11a12935f5274657 with lease ID 0x98969c9208cf92c3: from storage DS-105e7187-8301-43c9-93e5-849e3a6c177a node DatanodeRegistration(127.0.0.1:35661, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=39281, infoSecurePort=0, ipcPort=36897, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:18,067 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11a12935f5274657 with lease ID 0x98969c9208cf92c3: Processing first storage report for DS-e5813ee0-50f6-405a-94c6-23559b5d6037 from datanode DatanodeRegistration(127.0.0.1:35661, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=39281, infoSecurePort=0, ipcPort=36897, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543) 2024-12-06T08:14:18,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11a12935f5274657 with lease ID 0x98969c9208cf92c3: from storage DS-e5813ee0-50f6-405a-94c6-23559b5d6037 node DatanodeRegistration(127.0.0.1:35661, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=39281, infoSecurePort=0, ipcPort=36897, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:18,107 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d898677{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir/jetty-localhost-46869-hadoop-hdfs-3_4_1-tests_jar-_-any-9130443206315585837/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:18,107 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51324347{HTTP/1.1, (http/1.1)}{localhost:46869} 2024-12-06T08:14:18,107 INFO [Time-limited test {}] server.Server(415): Started @172490ms 2024-12-06T08:14:18,109 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:14:18,202 WARN [Thread-949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data3/current/BP-775885647-172.17.0.2-1733472857543/current, will proceed with Du for space computation calculation, 2024-12-06T08:14:18,202 WARN [Thread-950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data4/current/BP-775885647-172.17.0.2-1733472857543/current, will proceed with Du for space computation calculation, 2024-12-06T08:14:18,218 WARN [Thread-938 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:14:18,221 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba869b6a3565c228 with lease ID 0x98969c9208cf92c4: Processing first storage report for DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad from datanode DatanodeRegistration(127.0.0.1:34387, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=33873, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543) 2024-12-06T08:14:18,221 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba869b6a3565c228 with lease ID 0x98969c9208cf92c4: from storage DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad node DatanodeRegistration(127.0.0.1:34387, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=33873, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:18,221 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xba869b6a3565c228 with lease ID 0x98969c9208cf92c4: Processing first storage report for DS-e1b9cf0a-2e67-40bc-a296-4e5b76d1358a from datanode DatanodeRegistration(127.0.0.1:34387, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=33873, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543) 2024-12-06T08:14:18,221 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xba869b6a3565c228 with lease ID 0x98969c9208cf92c4: from storage DS-e1b9cf0a-2e67-40bc-a296-4e5b76d1358a node DatanodeRegistration(127.0.0.1:34387, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=33873, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:18,231 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5 2024-12-06T08:14:18,234 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/zookeeper_0, clientPort=53536, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:14:18,235 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=53536 2024-12-06T08:14:18,235 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:18,236 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:18,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:14:18,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:14:18,247 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548 with version=8 2024-12-06T08:14:18,247 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/hbase-staging 2024-12-06T08:14:18,249 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:14:18,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:14:18,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:14:18,249 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:14:18,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:14:18,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:14:18,249 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:14:18,249 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:14:18,250 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35039 2024-12-06T08:14:18,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:18,251 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:18,254 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35039 connecting to ZooKeeper ensemble=127.0.0.1:53536 2024-12-06T08:14:18,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350390x0, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:14:18,259 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35039-0x100666469600000 connected 2024-12-06T08:14:18,273 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:14:18,273 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:14:18,274 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:14:18,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35039 2024-12-06T08:14:18,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35039 2024-12-06T08:14:18,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35039 2024-12-06T08:14:18,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35039 2024-12-06T08:14:18,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35039 2024-12-06T08:14:18,284 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548, hbase.cluster.distributed=false 2024-12-06T08:14:18,308 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:14:18,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:14:18,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:14:18,309 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:14:18,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:14:18,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:14:18,309 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:14:18,309 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:14:18,310 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33733 2024-12-06T08:14:18,310 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:14:18,311 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:14:18,312 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:18,314 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:18,317 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33733 connecting to ZooKeeper ensemble=127.0.0.1:53536 2024-12-06T08:14:18,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337330x0, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:14:18,320 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:337330x0, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:14:18,320 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33733-0x100666469600001 connected 2024-12-06T08:14:18,321 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:14:18,321 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:14:18,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33733 2024-12-06T08:14:18,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33733 2024-12-06T08:14:18,322 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33733 2024-12-06T08:14:18,322 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33733 2024-12-06T08:14:18,322 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33733 2024-12-06T08:14:18,323 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b6b797fc3981,35039,1733472858248 2024-12-06T08:14:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:14:18,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:14:18,325 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b6b797fc3981,35039,1733472858248 2024-12-06T08:14:18,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:14:18,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:14:18,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,328 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:14:18,328 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b6b797fc3981,35039,1733472858248 from backup master directory 2024-12-06T08:14:18,329 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:14:18,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b6b797fc3981,35039,1733472858248 2024-12-06T08:14:18,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:14:18,330 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:14:18,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:14:18,330 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b6b797fc3981,35039,1733472858248 2024-12-06T08:14:18,337 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b6b797fc3981:35039 2024-12-06T08:14:18,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:14:18,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:14:18,344 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/hbase.id with ID: bb92d057-b890-45ae-b8d0-be29c2cc314b 2024-12-06T08:14:18,355 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:18,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:14:18,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:14:18,366 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:14:18,367 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:14:18,367 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:14:18,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:14:18,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:14:18,375 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store 2024-12-06T08:14:18,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:14:18,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:14:18,381 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:18,382 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:14:18,382 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:18,382 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:18,382 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:14:18,382 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:18,382 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:14:18,382 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:14:18,383 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/.initializing 2024-12-06T08:14:18,383 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248 2024-12-06T08:14:18,386 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C35039%2C1733472858248, suffix=, logDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248, archiveDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/oldWALs, maxLogs=10 2024-12-06T08:14:18,386 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C35039%2C1733472858248.1733472858386 2024-12-06T08:14:18,391 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 2024-12-06T08:14:18,391 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33873:33873),(127.0.0.1/127.0.0.1:39281:39281)] 2024-12-06T08:14:18,391 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:14:18,391 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:18,391 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,391 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,393 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:14:18,394 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,394 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:14:18,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:14:18,396 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:14:18,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,397 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:14:18,397 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:14:18,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:14:18,399 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,400 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:14:18,400 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,401 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,402 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:14:18,403 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:14:18,405 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:14:18,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768986, jitterRate=-0.022184208035469055}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:14:18,406 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:14:18,407 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:14:18,410 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5058c952, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:14:18,411 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T08:14:18,411 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:14:18,411 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:14:18,411 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:14:18,411 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T08:14:18,412 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T08:14:18,412 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:14:18,413 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:14:18,414 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:14:18,415 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:14:18,416 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:14:18,416 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:14:18,417 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:14:18,417 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:14:18,418 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:14:18,420 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:14:18,420 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:14:18,421 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:14:18,422 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:14:18,424 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:14:18,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:14:18,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:14:18,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,425 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b6b797fc3981,35039,1733472858248, sessionid=0x100666469600000, setting cluster-up flag (Was=false) 2024-12-06T08:14:18,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,433 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:14:18,434 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,35039,1733472858248 2024-12-06T08:14:18,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,441 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:14:18,442 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,35039,1733472858248 2024-12-06T08:14:18,444 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T08:14:18,445 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T08:14:18,445 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:14:18,445 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b6b797fc3981,35039,1733472858248 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:14:18,445 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:14:18,445 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:14:18,445 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:14:18,445 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:14:18,445 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b6b797fc3981:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:14:18,446 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,446 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:14:18,446 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,448 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:14:18,448 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:14:18,449 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733472888449 2024-12-06T08:14:18,449 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:14:18,449 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:14:18,449 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:14:18,449 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:14:18,449 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:14:18,449 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:14:18,450 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,450 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,450 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:14:18,450 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:14:18,450 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:14:18,450 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:14:18,451 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:14:18,451 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:14:18,451 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472858451,5,FailOnTimeoutGroup] 2024-12-06T08:14:18,451 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472858451,5,FailOnTimeoutGroup] 2024-12-06T08:14:18,451 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,452 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:14:18,452 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,452 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:14:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:14:18,458 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T08:14:18,458 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548 2024-12-06T08:14:18,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:14:18,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:14:18,465 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:18,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:14:18,467 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:14:18,468 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:14:18,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:14:18,470 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:14:18,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:14:18,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:14:18,472 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:14:18,472 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:18,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:14:18,473 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/meta/1588230740 2024-12-06T08:14:18,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/meta/1588230740 2024-12-06T08:14:18,475 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:14:18,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:14:18,479 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:14:18,480 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856985, jitterRate=0.08971326053142548}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:14:18,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:14:18,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:14:18,480 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:14:18,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:14:18,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:14:18,480 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:14:18,480 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:14:18,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:14:18,482 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:14:18,482 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T08:14:18,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:14:18,483 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:14:18,484 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:14:18,535 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b6b797fc3981:33733 2024-12-06T08:14:18,537 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1008): ClusterId : bb92d057-b890-45ae-b8d0-be29c2cc314b 2024-12-06T08:14:18,537 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:14:18,539 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:14:18,539 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:14:18,541 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:14:18,541 DEBUG [RS:0;b6b797fc3981:33733 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1fc387, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:14:18,542 DEBUG [RS:0;b6b797fc3981:33733 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33ce5274, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:14:18,542 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:14:18,542 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:14:18,542 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:14:18,543 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,35039,1733472858248 with isa=b6b797fc3981/172.17.0.2:33733, startcode=1733472858308 2024-12-06T08:14:18,543 DEBUG [RS:0;b6b797fc3981:33733 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:14:18,545 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54303, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:14:18,545 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35039 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,33733,1733472858308 2024-12-06T08:14:18,545 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35039 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,33733,1733472858308 2024-12-06T08:14:18,547 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548 2024-12-06T08:14:18,547 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38689 2024-12-06T08:14:18,547 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:14:18,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:14:18,549 DEBUG [RS:0;b6b797fc3981:33733 {}] zookeeper.ZKUtil(111): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,33733,1733472858308 2024-12-06T08:14:18,549 WARN [RS:0;b6b797fc3981:33733 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:14:18,549 INFO [RS:0;b6b797fc3981:33733 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:14:18,550 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308 2024-12-06T08:14:18,550 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,33733,1733472858308] 2024-12-06T08:14:18,553 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:14:18,553 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:14:18,554 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:14:18,555 INFO [RS:0;b6b797fc3981:33733 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:14:18,555 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,555 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:14:18,556 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:14:18,556 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:14:18,557 DEBUG [RS:0;b6b797fc3981:33733 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:14:18,557 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,557 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,557 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,557 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,557 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33733,1733472858308-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:14:18,572 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:14:18,572 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33733,1733472858308-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:18,586 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.Replication(204): b6b797fc3981,33733,1733472858308 started 2024-12-06T08:14:18,586 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,33733,1733472858308, RpcServer on b6b797fc3981/172.17.0.2:33733, sessionid=0x100666469600001 2024-12-06T08:14:18,586 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:14:18,586 DEBUG [RS:0;b6b797fc3981:33733 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,33733,1733472858308 2024-12-06T08:14:18,586 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,33733,1733472858308' 2024-12-06T08:14:18,586 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:14:18,587 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:14:18,587 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:14:18,587 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:14:18,587 DEBUG [RS:0;b6b797fc3981:33733 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,33733,1733472858308 2024-12-06T08:14:18,587 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,33733,1733472858308' 2024-12-06T08:14:18,587 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:14:18,587 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:14:18,588 DEBUG [RS:0;b6b797fc3981:33733 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:14:18,588 INFO [RS:0;b6b797fc3981:33733 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:14:18,588 INFO [RS:0;b6b797fc3981:33733 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:14:18,634 WARN [b6b797fc3981:35039 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:14:18,690 INFO [RS:0;b6b797fc3981:33733 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C33733%2C1733472858308, suffix=, logDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308, archiveDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/oldWALs, maxLogs=32 2024-12-06T08:14:18,691 INFO [RS:0;b6b797fc3981:33733 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:14:18,698 INFO [RS:0;b6b797fc3981:33733 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:14:18,698 DEBUG [RS:0;b6b797fc3981:33733 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33873:33873),(127.0.0.1/127.0.0.1:39281:39281)] 2024-12-06T08:14:18,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:18,885 DEBUG [b6b797fc3981:35039 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T08:14:18,885 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b6b797fc3981,33733,1733472858308 2024-12-06T08:14:18,886 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,33733,1733472858308, state=OPENING 2024-12-06T08:14:18,888 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:14:18,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:18,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b6b797fc3981,33733,1733472858308}] 2024-12-06T08:14:18,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:14:18,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:14:19,043 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,33733,1733472858308 2024-12-06T08:14:19,043 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:14:19,045 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:14:19,049 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T08:14:19,049 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:14:19,051 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C33733%2C1733472858308.meta, suffix=.meta, logDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308, archiveDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/oldWALs, maxLogs=32 2024-12-06T08:14:19,052 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta 2024-12-06T08:14:19,058 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta 2024-12-06T08:14:19,059 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39281:39281),(127.0.0.1/127.0.0.1:33873:33873)] 2024-12-06T08:14:19,059 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:14:19,059 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:14:19,059 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:14:19,059 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:14:19,059 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:14:19,060 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:19,060 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T08:14:19,060 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T08:14:19,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:14:19,062 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:14:19,062 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:19,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:14:19,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:14:19,064 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:14:19,064 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:19,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:14:19,064 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:14:19,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:14:19,065 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:19,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:14:19,066 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/meta/1588230740 2024-12-06T08:14:19,067 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/meta/1588230740 2024-12-06T08:14:19,069 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:14:19,070 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:14:19,071 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835375, jitterRate=0.062235504388809204}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:14:19,071 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:14:19,072 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733472859043 2024-12-06T08:14:19,074 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:14:19,074 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T08:14:19,075 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,33733,1733472858308 2024-12-06T08:14:19,076 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,33733,1733472858308, state=OPEN 2024-12-06T08:14:19,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:14:19,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:14:19,080 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:14:19,080 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:14:19,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:14:19,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b6b797fc3981,33733,1733472858308 in 190 msec 2024-12-06T08:14:19,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:14:19,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 601 msec 2024-12-06T08:14:19,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 642 msec 2024-12-06T08:14:19,087 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733472859087, completionTime=-1 2024-12-06T08:14:19,087 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T08:14:19,087 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T08:14:19,088 DEBUG [hconnection-0x326608b0-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:14:19,089 INFO [RS-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:14:19,090 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T08:14:19,090 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733472919090 2024-12-06T08:14:19,090 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733472979090 2024-12-06T08:14:19,090 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 3 msec 2024-12-06T08:14:19,096 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,35039,1733472858248-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:19,096 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,35039,1733472858248-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:19,096 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,35039,1733472858248-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:19,096 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b6b797fc3981:35039, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:19,096 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:19,096 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T08:14:19,096 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:14:19,097 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T08:14:19,098 DEBUG [master/b6b797fc3981:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T08:14:19,098 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:14:19,098 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:19,099 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:14:19,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:14:19,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:14:19,108 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 51cfbf71f3654e0c90300f77ec1fcadd, NAME => 'hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548 2024-12-06T08:14:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:14:19,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:14:19,114 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:19,114 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 51cfbf71f3654e0c90300f77ec1fcadd, disabling compactions & flushes 2024-12-06T08:14:19,114 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:14:19,115 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:14:19,115 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. after waiting 0 ms 2024-12-06T08:14:19,115 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:14:19,115 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:14:19,115 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 51cfbf71f3654e0c90300f77ec1fcadd: 2024-12-06T08:14:19,116 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:14:19,116 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733472859116"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472859116"}]},"ts":"1733472859116"} 2024-12-06T08:14:19,118 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:14:19,119 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:14:19,119 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472859119"}]},"ts":"1733472859119"} 2024-12-06T08:14:19,120 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T08:14:19,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=51cfbf71f3654e0c90300f77ec1fcadd, ASSIGN}] 2024-12-06T08:14:19,126 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=51cfbf71f3654e0c90300f77ec1fcadd, ASSIGN 2024-12-06T08:14:19,127 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=51cfbf71f3654e0c90300f77ec1fcadd, ASSIGN; state=OFFLINE, location=b6b797fc3981,33733,1733472858308; forceNewPlan=false, retain=false 2024-12-06T08:14:19,277 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=51cfbf71f3654e0c90300f77ec1fcadd, regionState=OPENING, regionLocation=b6b797fc3981,33733,1733472858308 2024-12-06T08:14:19,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 51cfbf71f3654e0c90300f77ec1fcadd, server=b6b797fc3981,33733,1733472858308}] 2024-12-06T08:14:19,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,33733,1733472858308 2024-12-06T08:14:19,436 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:14:19,436 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 51cfbf71f3654e0c90300f77ec1fcadd, NAME => 'hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:14:19,437 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:14:19,437 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:19,437 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:14:19,437 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:14:19,438 INFO [StoreOpener-51cfbf71f3654e0c90300f77ec1fcadd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:14:19,440 INFO [StoreOpener-51cfbf71f3654e0c90300f77ec1fcadd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51cfbf71f3654e0c90300f77ec1fcadd columnFamilyName info 2024-12-06T08:14:19,440 DEBUG [StoreOpener-51cfbf71f3654e0c90300f77ec1fcadd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:19,440 INFO [StoreOpener-51cfbf71f3654e0c90300f77ec1fcadd-1 {}] regionserver.HStore(327): Store=51cfbf71f3654e0c90300f77ec1fcadd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:14:19,441 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:14:19,442 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:14:19,444 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:14:19,446 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:14:19,446 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 51cfbf71f3654e0c90300f77ec1fcadd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=823042, jitterRate=0.04655282199382782}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:14:19,447 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 51cfbf71f3654e0c90300f77ec1fcadd: 2024-12-06T08:14:19,447 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd., pid=6, masterSystemTime=1733472859432 2024-12-06T08:14:19,450 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:14:19,450 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:14:19,450 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=51cfbf71f3654e0c90300f77ec1fcadd, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,33733,1733472858308 2024-12-06T08:14:19,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:14:19,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 51cfbf71f3654e0c90300f77ec1fcadd, server=b6b797fc3981,33733,1733472858308 in 172 msec 2024-12-06T08:14:19,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:14:19,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=51cfbf71f3654e0c90300f77ec1fcadd, ASSIGN in 329 msec 2024-12-06T08:14:19,457 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:14:19,457 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472859457"}]},"ts":"1733472859457"} 2024-12-06T08:14:19,459 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T08:14:19,461 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:14:19,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 365 msec 2024-12-06T08:14:19,498 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T08:14:19,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:19,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:14:19,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:14:19,505 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T08:14:19,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:14:19,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 11 msec 2024-12-06T08:14:19,527 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T08:14:19,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:14:19,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 10 msec 2024-12-06T08:14:19,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T08:14:19,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.224sec 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,35039,1733472858248-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:14:19,554 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,35039,1733472858248-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:14:19,556 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:14:19,556 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:14:19,556 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,35039,1733472858248-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:14:19,625 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x227fb64a to 127.0.0.1:53536 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1507a66b 2024-12-06T08:14:19,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e49506e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:14:19,631 DEBUG [hconnection-0x76558132-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:14:19,632 INFO [RS-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:14:19,634 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b6b797fc3981,35039,1733472858248 2024-12-06T08:14:19,634 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:14:19,638 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T08:14:19,638 INFO [Time-limited test {}] wal.TestLogRolling(297): Starting testLogRollOnPipelineRestart 2024-12-06T08:14:19,638 INFO [Time-limited test {}] wal.TestLogRolling(300): Replication=2 2024-12-06T08:14:19,639 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:14:19,641 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:14:19,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35039 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T08:14:19,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35039 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T08:14:19,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35039 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:14:19,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35039 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T08:14:19,644 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:14:19,644 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:19,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35039 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 9 2024-12-06T08:14:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35039 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:14:19,645 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:14:19,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741837_1013 (size=395) 2024-12-06T08:14:19,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741837_1013 (size=395) 2024-12-06T08:14:19,654 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 15be0fc643a8e496cf59a78a3bbf8d9b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548 2024-12-06T08:14:19,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34387 is added to blk_1073741838_1014 (size=78) 2024-12-06T08:14:19,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35661 is added to blk_1073741838_1014 (size=78) 2024-12-06T08:14:19,661 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:19,661 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1681): Closing 15be0fc643a8e496cf59a78a3bbf8d9b, disabling compactions & flushes 2024-12-06T08:14:19,661 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:19,661 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:19,661 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. after waiting 0 ms 2024-12-06T08:14:19,661 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:19,661 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:19,661 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1635): Region close journal for 15be0fc643a8e496cf59a78a3bbf8d9b: 2024-12-06T08:14:19,662 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:14:19,663 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733472859662"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472859662"}]},"ts":"1733472859662"} 2024-12-06T08:14:19,664 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:14:19,665 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:14:19,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472859665"}]},"ts":"1733472859665"} 2024-12-06T08:14:19,667 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-06T08:14:19,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=15be0fc643a8e496cf59a78a3bbf8d9b, ASSIGN}] 2024-12-06T08:14:19,672 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=15be0fc643a8e496cf59a78a3bbf8d9b, ASSIGN 2024-12-06T08:14:19,673 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=15be0fc643a8e496cf59a78a3bbf8d9b, ASSIGN; state=OFFLINE, location=b6b797fc3981,33733,1733472858308; forceNewPlan=false, retain=false 2024-12-06T08:14:19,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:19,823 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=15be0fc643a8e496cf59a78a3bbf8d9b, regionState=OPENING, regionLocation=b6b797fc3981,33733,1733472858308 2024-12-06T08:14:19,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 15be0fc643a8e496cf59a78a3bbf8d9b, server=b6b797fc3981,33733,1733472858308}] 2024-12-06T08:14:19,978 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,33733,1733472858308 2024-12-06T08:14:19,982 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:19,983 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 15be0fc643a8e496cf59a78a3bbf8d9b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:14:19,983 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:14:19,983 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:14:19,983 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:14:19,983 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:14:19,985 INFO [StoreOpener-15be0fc643a8e496cf59a78a3bbf8d9b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:14:19,986 INFO [StoreOpener-15be0fc643a8e496cf59a78a3bbf8d9b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 15be0fc643a8e496cf59a78a3bbf8d9b columnFamilyName info 2024-12-06T08:14:19,986 DEBUG [StoreOpener-15be0fc643a8e496cf59a78a3bbf8d9b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:14:19,987 INFO [StoreOpener-15be0fc643a8e496cf59a78a3bbf8d9b-1 {}] regionserver.HStore(327): Store=15be0fc643a8e496cf59a78a3bbf8d9b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:14:19,988 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/default/TestLogRolling-testLogRollOnPipelineRestart/15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:14:19,988 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/default/TestLogRolling-testLogRollOnPipelineRestart/15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:14:19,990 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:14:19,992 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/default/TestLogRolling-testLogRollOnPipelineRestart/15be0fc643a8e496cf59a78a3bbf8d9b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:14:19,993 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 15be0fc643a8e496cf59a78a3bbf8d9b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770244, jitterRate=-0.02058456838130951}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:14:19,993 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 15be0fc643a8e496cf59a78a3bbf8d9b: 2024-12-06T08:14:19,994 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b., pid=11, masterSystemTime=1733472859978 2024-12-06T08:14:19,996 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:19,996 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:19,997 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=15be0fc643a8e496cf59a78a3bbf8d9b, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,33733,1733472858308 2024-12-06T08:14:20,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T08:14:20,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 15be0fc643a8e496cf59a78a3bbf8d9b, server=b6b797fc3981,33733,1733472858308 in 174 msec 2024-12-06T08:14:20,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T08:14:20,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=15be0fc643a8e496cf59a78a3bbf8d9b, ASSIGN in 330 msec 2024-12-06T08:14:20,004 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:14:20,004 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472860004"}]},"ts":"1733472860004"} 2024-12-06T08:14:20,006 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-06T08:14:20,008 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:14:20,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 366 msec 2024-12-06T08:14:20,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:21,274 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T08:14:21,274 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-06T08:14:21,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:22,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:23,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:24,561 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:14:24,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:14:24,597 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T08:14:24,597 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-06T08:14:24,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:25,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:26,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:27,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:28,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35039 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:14:29,647 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart, procId: 9 completed 2024-12-06T08:14:29,649 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-06T08:14:29,649 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:14:29,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:30,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:31,656 INFO [Time-limited test {}] wal.TestLogRolling(337): log.getCurrentFileName()): hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:14:31,656 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:31,656 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:34387,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:31,656 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:31,657 WARN [DataStreamer for file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 block BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34387,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK], DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34387,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]) is bad. 2024-12-06T08:14:31,657 WARN [DataStreamer for file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 block BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34387,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK], DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34387,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]) is bad. 2024-12-06T08:14:31,657 WARN [DataStreamer for file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta block BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK], DatanodeInfoWithStorage[127.0.0.1:34387,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34387,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]) is bad. 2024-12-06T08:14:31,657 WARN [PacketResponder: BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34387] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,657 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:43752 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34387:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43752 dst: /127.0.0.1:34387 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-233802285_22 at /127.0.0.1:43710 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34387:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43710 dst: /127.0.0.1:34387 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:56582 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56582 dst: /127.0.0.1:35661 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-233802285_22 at /127.0.0.1:56558 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56558 dst: /127.0.0.1:35661 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:43754 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34387:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43754 dst: /127.0.0.1:34387 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:56584 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56584 dst: /127.0.0.1:35661 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d898677{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:31,660 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51324347{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:14:31,660 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:14:31,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3800cd12{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:14:31,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64b5487{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,STOPPED} 2024-12-06T08:14:31,662 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:14:31,662 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:14:31,662 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-775885647-172.17.0.2-1733472857543 (Datanode Uuid fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e) service to localhost/127.0.0.1:38689 2024-12-06T08:14:31,662 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:14:31,663 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data4/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:31,663 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:14:31,663 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data3/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:31,675 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:31,678 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:14:31,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:14:31,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:14:31,679 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:14:31,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e636599{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:14:31,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fb87889{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:14:31,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77d908ac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir/jetty-localhost-34429-hadoop-hdfs-3_4_1-tests_jar-_-any-15202833964564238037/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:31,795 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@784500cc{HTTP/1.1, (http/1.1)}{localhost:34429} 2024-12-06T08:14:31,795 INFO [Time-limited test {}] server.Server(415): Started @186178ms 2024-12-06T08:14:31,797 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:14:31,813 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:31,813 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:31,813 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:31,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:34466 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34466 dst: /127.0.0.1:35661 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,814 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:34476 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34476 dst: /127.0.0.1:35661 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,815 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-233802285_22 at /127.0.0.1:34474 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35661:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34474 dst: /127.0.0.1:35661 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:31,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:31,818 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31fce438{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:31,818 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c2473d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:14:31,818 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:14:31,818 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e67e938{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:14:31,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63044f0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,STOPPED} 2024-12-06T08:14:31,820 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:14:31,820 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-775885647-172.17.0.2-1733472857543 (Datanode Uuid 347734b2-08ee-4766-a60a-25af9c6395cb) service to localhost/127.0.0.1:38689 2024-12-06T08:14:31,820 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:14:31,820 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:14:31,820 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data1/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:31,821 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data2/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:31,821 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:14:31,831 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:31,834 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:14:31,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:14:31,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:14:31,835 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:14:31,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@128db53e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:14:31,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d115d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:14:31,895 WARN [Thread-1084 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:14:31,897 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd99ea5e4f32923b with lease ID 0x98969c9208cf92c5: from storage DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad node DatanodeRegistration(127.0.0.1:43545, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=41857, infoSecurePort=0, ipcPort=44689, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:31,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd99ea5e4f32923b with lease ID 0x98969c9208cf92c5: from storage DS-e1b9cf0a-2e67-40bc-a296-4e5b76d1358a node DatanodeRegistration(127.0.0.1:43545, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=41857, infoSecurePort=0, ipcPort=44689, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:31,952 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73f5eff5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir/jetty-localhost-43519-hadoop-hdfs-3_4_1-tests_jar-_-any-12321467471144347041/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:31,953 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5aaad42a{HTTP/1.1, (http/1.1)}{localhost:43519} 2024-12-06T08:14:31,953 INFO [Time-limited test {}] server.Server(415): Started @186336ms 2024-12-06T08:14:31,954 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:14:32,034 WARN [Thread-1115 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:14:32,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8a57096e6ffde76 with lease ID 0x98969c9208cf92c6: from storage DS-105e7187-8301-43c9-93e5-849e3a6c177a node DatanodeRegistration(127.0.0.1:34819, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=35009, infoSecurePort=0, ipcPort=43377, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:32,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8a57096e6ffde76 with lease ID 0x98969c9208cf92c6: from storage DS-e5813ee0-50f6-405a-94c6-23559b5d6037 node DatanodeRegistration(127.0.0.1:34819, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=35009, infoSecurePort=0, ipcPort=43377, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:32,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:32,972 INFO [Time-limited test {}] wal.TestLogRolling(349): Data Nodes restarted 2024-12-06T08:14:32,974 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-06T08:14:32,975 WARN [RS:0;b6b797fc3981:33733.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=5, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:32,975 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C33733%2C1733472858308:(num 1733472858691) roll requested 2024-12-06T08:14:32,975 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:14:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33733 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33733 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37758 deadline: 1733472882974, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-06T08:14:32,981 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 newFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:14:32,981 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=5, requesting roll of WAL 2024-12-06T08:14:32,981 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 with entries=5, filesize=2.09 KB; new WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:14:32,982 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35009:35009),(127.0.0.1/127.0.0.1:41857:41857)] 2024-12-06T08:14:32,982 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 is not closed yet, will try archiving it next time 2024-12-06T08:14:32,982 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:32,982 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:32,982 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:14:32,982 WARN [IPC Server handler 0 on default port 38689 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1017 2024-12-06T08:14:32,983 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 after 1ms 2024-12-06T08:14:33,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:34,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:35,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:35,897 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1017: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T08:14:36,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:36,983 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 after 4001ms 2024-12-06T08:14:37,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:38,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:39,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:40,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:41,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:42,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:43,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:44,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:45,017 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-06T08:14:45,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:46,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:47,019 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:47,019 WARN [DataStreamer for file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 block BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34819,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK], DatanodeInfoWithStorage[127.0.0.1:43545,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34819,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]) is bad. 2024-12-06T08:14:47,020 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:37026 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34819:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37026 dst: /127.0.0.1:34819 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:47,020 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:47292 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:43545:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47292 dst: /127.0.0.1:43545 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:47,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73f5eff5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:47,022 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5aaad42a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:14:47,023 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:14:47,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d115d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:14:47,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@128db53e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,STOPPED} 2024-12-06T08:14:47,025 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:14:47,025 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:14:47,025 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-775885647-172.17.0.2-1733472857543 (Datanode Uuid 347734b2-08ee-4766-a60a-25af9c6395cb) service to localhost/127.0.0.1:38689 2024-12-06T08:14:47,025 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:14:47,026 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data1/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:47,026 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data2/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:47,026 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:14:47,043 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:47,046 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:14:47,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:14:47,047 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:14:47,047 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:14:47,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@631e3561{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:14:47,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a7daec6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:14:47,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@497d199b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir/jetty-localhost-33699-hadoop-hdfs-3_4_1-tests_jar-_-any-13008972376227841163/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:47,174 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@738d46da{HTTP/1.1, (http/1.1)}{localhost:33699} 2024-12-06T08:14:47,174 INFO [Time-limited test {}] server.Server(415): Started @201557ms 2024-12-06T08:14:47,176 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:14:47,193 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:47,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1785382874_22 at /127.0.0.1:48870 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741839_1018] {}] datanode.DataXceiver(331): 127.0.0.1:43545:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48870 dst: /127.0.0.1:43545 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:14:47,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77d908ac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:47,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@784500cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:14:47,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:14:47,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fb87889{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:14:47,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e636599{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,STOPPED} 2024-12-06T08:14:47,202 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:14:47,202 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:14:47,202 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-775885647-172.17.0.2-1733472857543 (Datanode Uuid fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e) service to localhost/127.0.0.1:38689 2024-12-06T08:14:47,202 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:14:47,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data3/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:47,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data4/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:14:47,203 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:14:47,212 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:14:47,215 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:14:47,215 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:14:47,215 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:14:47,215 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:14:47,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@694271f6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:14:47,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f4fdc7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:14:47,275 WARN [Thread-1159 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:14:47,277 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2bc93a45098cd4b with lease ID 0x98969c9208cf92c7: from storage DS-105e7187-8301-43c9-93e5-849e3a6c177a node DatanodeRegistration(127.0.0.1:37227, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=39467, infoSecurePort=0, ipcPort=42117, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:47,278 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa2bc93a45098cd4b with lease ID 0x98969c9208cf92c7: from storage DS-e5813ee0-50f6-405a-94c6-23559b5d6037 node DatanodeRegistration(127.0.0.1:37227, datanodeUuid=347734b2-08ee-4766-a60a-25af9c6395cb, infoPort=39467, infoSecurePort=0, ipcPort=42117, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 8, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T08:14:47,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@168f7695{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/java.io.tmpdir/jetty-localhost-44391-hadoop-hdfs-3_4_1-tests_jar-_-any-8904667654475844516/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:14:47,333 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d5f9043{HTTP/1.1, (http/1.1)}{localhost:44391} 2024-12-06T08:14:47,333 INFO [Time-limited test {}] server.Server(415): Started @201716ms 2024-12-06T08:14:47,334 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:14:47,440 WARN [Thread-1190 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:14:47,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98b0280a92cc0def with lease ID 0x98969c9208cf92c8: from storage DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad node DatanodeRegistration(127.0.0.1:44837, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=45799, infoSecurePort=0, ipcPort=38765, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T08:14:47,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x98b0280a92cc0def with lease ID 0x98969c9208cf92c8: from storage DS-e1b9cf0a-2e67-40bc-a296-4e5b76d1358a node DatanodeRegistration(127.0.0.1:44837, datanodeUuid=fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e, infoPort=45799, infoSecurePort=0, ipcPort=38765, storageInfo=lv=-57;cid=testClusterID;nsid=2104124353;c=1733472857543), blocks: 8, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:14:47,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:48,231 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:14:48,352 INFO [Time-limited test {}] wal.TestLogRolling(366): Data Nodes restarted 2024-12-06T08:14:48,354 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-06T08:14:48,355 WARN [RS:0;b6b797fc3981:33733.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=8, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43545,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,355 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C33733%2C1733472858308:(num 1733472872975) roll requested 2024-12-06T08:14:48,355 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33733%2C1733472858308.1733472888355 2024-12-06T08:14:48,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33733 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43545,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33733 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:37758 deadline: 1733472898354, exception=org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-06T08:14:48,363 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 newFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 2024-12-06T08:14:48,363 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=8, requesting roll of WAL 2024-12-06T08:14:48,363 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 2024-12-06T08:14:48,363 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39467:39467),(127.0.0.1/127.0.0.1:45799:45799)] 2024-12-06T08:14:48,363 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 is not closed yet, will try archiving it next time 2024-12-06T08:14:48,363 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43545,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,363 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43545,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,363 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:14:48,364 WARN [IPC Server handler 1 on default port 38689 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1020 2024-12-06T08:14:48,364 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 after 1ms 2024-12-06T08:14:48,449 WARN [master/b6b797fc3981:0:becomeActiveMaster.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=95, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,450 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C35039%2C1733472858248:(num 1733472858386) roll requested 2024-12-06T08:14:48,450 ERROR [ProcExecTimeout {}] region.RegionProcedureStore(422): Failed to delete pids=[4, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,450 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C35039%2C1733472858248.1733472888450 2024-12-06T08:14:48,450 ERROR [ProcExecTimeout {}] procedure2.TimeoutExecutorThread(124): Ignoring pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner exception: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL java.io.UncheckedIOException: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore.delete(RegionProcedureStore.java:423) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner.periodicExecute(CompletedProcedureCleaner.java:135) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.executeInMemoryChore(TimeoutExecutorThread.java:122) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.execDelayedProcedure(TimeoutExecutorThread.java:101) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:68) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] Caused by: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,456 WARN [master:store-WAL-Roller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=95, requesting roll of WAL 2024-12-06T08:14:48,456 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 with entries=92, filesize=45.99 KB; new WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472888450 2024-12-06T08:14:48,456 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45799:45799),(127.0.0.1/127.0.0.1:39467:39467)] 2024-12-06T08:14:48,456 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(751): hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 is not closed yet, will try archiving it next time 2024-12-06T08:14:48,457 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,457 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:14:48,457 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 2024-12-06T08:14:48,457 WARN [IPC Server handler 3 on default port 38689 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741830_1016 2024-12-06T08:14:48,458 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 after 1ms 2024-12-06T08:14:48,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:49,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:50,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:51,278 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741839_1020: GenerationStamp not matched, existing replica is blk_1073741839_1018 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T08:14:51,542 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:14:51,543 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:14:51,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:52,365 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 after 4002ms 2024-12-06T08:14:52,458 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248/b6b797fc3981%2C35039%2C1733472858248.1733472858386 after 4001ms 2024-12-06T08:14:52,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:53,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:54,442 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1016: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T08:14:54,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:55,712 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1fe245b8d8375ab9b801a570a1b2b5e8, had cached 0 bytes from a total of 23930 2024-12-06T08:14:55,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:56,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:57,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:58,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:14:59,557 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T08:14:59,557 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T08:14:59,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:00,406 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:00,413 DEBUG [Time-limited test {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 newFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:00,414 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:00,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45799:45799),(127.0.0.1/127.0.0.1:39467:39467)] 2024-12-06T08:15:00,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 is not closed yet, will try archiving it next time 2024-12-06T08:15:00,414 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:15:00,414 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:15:00,415 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 after 1ms 2024-12-06T08:15:00,415 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:15:00,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741840_1021 (size=1264) 2024-12-06T08:15:00,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741840_1021 (size=1264) 2024-12-06T08:15:00,422 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733472859447/Put/vlen=162/seqid=0] 2024-12-06T08:15:00,422 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [default/info:d/1733472859510/Put/vlen=9/seqid=0] 2024-12-06T08:15:00,422 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #5: [hbase/info:d/1733472859531/Put/vlen=7/seqid=0] 2024-12-06T08:15:00,422 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733472859993/Put/vlen=218/seqid=0] 2024-12-06T08:15:00,422 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #4: [row1002/info:/1733472869653/Put/vlen=1045/seqid=0] 2024-12-06T08:15:00,422 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472858691 2024-12-06T08:15:00,422 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:15:00,422 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:15:00,423 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 after 1ms 2024-12-06T08:15:00,423 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:15:00,426 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #6: [row1003/info:/1733472883014/Put/vlen=1045/seqid=0] 2024-12-06T08:15:00,426 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #7: [row1004/info:/1733472885017/Put/vlen=1045/seqid=0] 2024-12-06T08:15:00,426 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472872975 2024-12-06T08:15:00,426 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 2024-12-06T08:15:00,426 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 2024-12-06T08:15:00,426 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 after 0ms 2024-12-06T08:15:00,426 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472888355 2024-12-06T08:15:00,430 DEBUG [Time-limited test {}] wal.TestLogRolling(389): #9: [row1005/info:/1733472898405/Put/vlen=1045/seqid=0] 2024-12-06T08:15:00,430 DEBUG [Time-limited test {}] wal.TestLogRolling(380): recovering lease for hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:00,430 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:00,430 WARN [IPC Server handler 0 on default port 38689 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741842_1025 2024-12-06T08:15:00,431 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 after 1ms 2024-12-06T08:15:00,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:01,281 WARN [ResponseProcessor for block BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:01,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-233802285_22 at /127.0.0.1:57486 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:44837:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57486 dst: /127.0.0.1:44837 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44837 remote=/127.0.0.1:57486]. Total timeout mills is 60000, 59132 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:15:01,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-233802285_22 at /127.0.0.1:45644 [Receiving block BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:37227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45644 dst: /127.0.0.1:37227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:15:01,281 WARN [DataStreamer for file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 block BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44837,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK], DatanodeInfoWithStorage[127.0.0.1:37227,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44837,DS-c077aa81-bbd4-473d-8bc7-d2a64e5464ad,DISK]) is bad. 2024-12-06T08:15:01,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741842_1026 (size=85) 2024-12-06T08:15:01,286 WARN [DataStreamer for file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 block BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:01,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741842_1026 (size=85) 2024-12-06T08:15:01,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:02,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:03,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:04,431 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 after 4001ms 2024-12-06T08:15:04,431 DEBUG [Time-limited test {}] wal.TestLogRolling(384): Reading WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:04,436 DEBUG [Time-limited test {}] wal.TestLogRolling(396): EOF reading file /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:04,436 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 51cfbf71f3654e0c90300f77ec1fcadd 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T08:15:04,437 WARN [RS:0;b6b797fc3981:33733.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=7, requesting roll of WAL org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,437 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C33733%2C1733472858308:(num 1733472900406) roll requested 2024-12-06T08:15:04,437 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 51cfbf71f3654e0c90300f77ec1fcadd: 2024-12-06T08:15:04,437 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33733%2C1733472858308.1733472904437 2024-12-06T08:15:04,437 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,438 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.42 KB 2024-12-06T08:15:04,438 WARN [RS_OPEN_META-regionserver/b6b797fc3981:0-0.append-pool-0 {}] wal.FSHLog$RingBufferEventHandler(1189): Append sequenceId=15, requesting roll of WAL java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,438 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-06T08:15:04,438 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,439 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 15be0fc643a8e496cf59a78a3bbf8d9b 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-06T08:15:04,439 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 15be0fc643a8e496cf59a78a3bbf8d9b: 2024-12-06T08:15:04,439 INFO [Time-limited test {}] wal.TestLogRolling(416): org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,442 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T08:15:04,442 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:15:04,442 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x227fb64a to 127.0.0.1:53536 2024-12-06T08:15:04,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:15:04,442 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:15:04,443 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1591161763, stopped=false 2024-12-06T08:15:04,443 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b6b797fc3981,35039,1733472858248 2024-12-06T08:15:04,444 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.TestLogRolling$2(324): preLogRoll: oldFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 newFile=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472904437 2024-12-06T08:15:04,444 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=7, requesting roll of WAL 2024-12-06T08:15:04,444 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472904437 2024-12-06T08:15:04,445 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39467:39467),(127.0.0.1/127.0.0.1:45799:45799)] 2024-12-06T08:15:04,445 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 is not closed yet, will try archiving it next time 2024-12-06T08:15:04,445 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(197): WAL FSHLog b6b797fc3981%2C33733%2C1733472858308.meta:.meta(num 1733472859052) roll requested 2024-12-06T08:15:04,445 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,445 INFO [regionserver/b6b797fc3981:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33733%2C1733472858308.meta.1733472904445.meta 2024-12-06T08:15:04,445 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-775885647-172.17.0.2-1733472857543:blk_1073741842_1025 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,445 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:04,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:15:04,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:04,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:15:04,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:04,446 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T08:15:04,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:15:04,446 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:15:04,446 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,33733,1733472858308' ***** 2024-12-06T08:15:04,446 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:15:04,446 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:15:04,446 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 after 1ms 2024-12-06T08:15:04,448 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:15:04,448 INFO [RS:0;b6b797fc3981:33733 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:15:04,448 INFO [RS:0;b6b797fc3981:33733 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:15:04,448 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:15:04,448 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(3579): Received CLOSE for 51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:15:04,449 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.1733472900406 to hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/oldWALs/b6b797fc3981%2C33733%2C1733472858308.1733472900406 2024-12-06T08:15:04,450 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(3579): Received CLOSE for 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:15:04,450 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,33733,1733472858308 2024-12-06T08:15:04,450 DEBUG [RS:0;b6b797fc3981:33733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:15:04,450 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 51cfbf71f3654e0c90300f77ec1fcadd, disabling compactions & flushes 2024-12-06T08:15:04,450 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:15:04,450 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:15:04,450 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:15:04,450 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:15:04,450 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:15:04,450 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:15:04,450 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. after waiting 0 ms 2024-12-06T08:15:04,450 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:15:04,450 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 51cfbf71f3654e0c90300f77ec1fcadd 1/1 column families, dataSize=78 B heapSize=728 B 2024-12-06T08:15:04,450 WARN [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-06T08:15:04,450 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T08:15:04,451 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1603): Online Regions={51cfbf71f3654e0c90300f77ec1fcadd=hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd., 1588230740=hbase:meta,,1.1588230740, 15be0fc643a8e496cf59a78a3bbf8d9b=TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b.} 2024-12-06T08:15:04,451 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 15be0fc643a8e496cf59a78a3bbf8d9b, 51cfbf71f3654e0c90300f77ec1fcadd 2024-12-06T08:15:04,451 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:15:04,451 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:15:04,451 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:15:04,451 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:15:04,451 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:15:04,451 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.90 KB heapSize=5.89 KB 2024-12-06T08:15:04,451 WARN [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-06T08:15:04,451 WARN [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultMemStore(92): Snapshot called again without clearing previous. Doing nothing. Another ongoing flush or did we fail last attempt? 2024-12-06T08:15:04,451 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:15:04,452 ERROR [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2808): ***** ABORTING region server b6b797fc3981,33733,1733472858308: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) ~[classes/:?] at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) ~[disruptor-3.4.4.jar:?] at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) ~[disruptor-3.4.4.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,452 ERROR [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2815): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-12-06T08:15:04,452 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-12-06T08:15:04,453 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-12-06T08:15:04,453 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-12-06T08:15:04,453 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-12-06T08:15:04,453 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2819): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 674985952 }, "NonHeapMemoryUsage": { "committed": 169869312, "init": 7667712, "max": -1, "used": 167905064 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-12-06T08:15:04,453 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35039 {}] master.MasterRpcServices(626): b6b797fc3981,33733,1733472858308 reported a fatal error: ***** ABORTING region server b6b797fc3981,33733,1733472858308: Unrecoverable exception while closing hbase:meta,,1.1588230740 ***** Cause: org.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.append(FSHLog.java:1191) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:1064) at org.apache.hadoop.hbase.regionserver.wal.FSHLog$RingBufferEventHandler.onEvent(FSHLog.java:967) at com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:168) at com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) 2024-12-06T08:15:04,459 WARN [regionserver/b6b797fc3981:0.logRoller {}] wal.FSHLog(373): Failed sync-before-close but no outstanding appends; closing WALorg.apache.hadoop.hbase.regionserver.wal.DamagedWALException: Append sequenceId=15, requesting roll of WAL 2024-12-06T08:15:04,460 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta with entries=11, filesize=3.66 KB; new WAL /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472904445.meta 2024-12-06T08:15:04,462 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45799:45799),(127.0.0.1/127.0.0.1:39467:39467)] 2024-12-06T08:15:04,462 DEBUG [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractFSWAL(751): hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta is not closed yet, will try archiving it next time 2024-12-06T08:15:04,462 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,462 WARN [Close-WAL-Writer-0 {}] wal.FSHLog(462): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35661,DS-105e7187-8301-43c9-93e5-849e3a6c177a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-06T08:15:04,463 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta 2024-12-06T08:15:04,463 WARN [IPC Server handler 0 on default port 38689 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta has not been closed. Lease recovery is in progress. RecoveryId = 1029 for block blk_1073741834_1015 2024-12-06T08:15:04,463 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta after 0ms 2024-12-06T08:15:04,473 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd/.tmp/info/298cb014b1dc44abb8b70b3429b3dfb1 is 45, key is default/info:d/1733472859510/Put/seqid=0 2024-12-06T08:15:04,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741845_1030 (size=5037) 2024-12-06T08:15:04,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741845_1030 (size=5037) 2024-12-06T08:15:04,479 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd/.tmp/info/298cb014b1dc44abb8b70b3429b3dfb1 2024-12-06T08:15:04,486 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd/.tmp/info/298cb014b1dc44abb8b70b3429b3dfb1 as hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd/info/298cb014b1dc44abb8b70b3429b3dfb1 2024-12-06T08:15:04,491 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd/info/298cb014b1dc44abb8b70b3429b3dfb1, entries=2, sequenceid=8, filesize=4.9 K 2024-12-06T08:15:04,492 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 51cfbf71f3654e0c90300f77ec1fcadd in 42ms, sequenceid=8, compaction requested=false 2024-12-06T08:15:04,495 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/data/hbase/namespace/51cfbf71f3654e0c90300f77ec1fcadd/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-06T08:15:04,496 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:15:04,496 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 51cfbf71f3654e0c90300f77ec1fcadd: 2024-12-06T08:15:04,496 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733472859096.51cfbf71f3654e0c90300f77ec1fcadd. 2024-12-06T08:15:04,496 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 15be0fc643a8e496cf59a78a3bbf8d9b, disabling compactions & flushes 2024-12-06T08:15:04,496 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,496 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,496 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. after waiting 0 ms 2024-12-06T08:15:04,496 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,496 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 15be0fc643a8e496cf59a78a3bbf8d9b: 2024-12-06T08:15:04,497 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,559 INFO [regionserver/b6b797fc3981:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:15:04,561 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T08:15:04,561 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T08:15:04,651 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:15:04,651 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(3579): Received CLOSE for 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:15:04,651 DEBUG [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 15be0fc643a8e496cf59a78a3bbf8d9b 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 15be0fc643a8e496cf59a78a3bbf8d9b, disabling compactions & flushes 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:15:04,651 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:15:04,651 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. after waiting 0 ms 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 15be0fc643a8e496cf59a78a3bbf8d9b: 2024-12-06T08:15:04,651 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:15:04,652 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing TestLogRolling-testLogRollOnPipelineRestart,,1733472859642.15be0fc643a8e496cf59a78a3bbf8d9b. 2024-12-06T08:15:04,652 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionServer(2803): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:meta,,1.1588230740 2024-12-06T08:15:04,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:04,851 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1624): We were exiting though online regions are not empty, because some regions failed closing 2024-12-06T08:15:04,851 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,33733,1733472858308; all regions closed. 2024-12-06T08:15:04,852 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308 2024-12-06T08:15:04,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741844_1028 (size=93) 2024-12-06T08:15:04,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741844_1028 (size=93) 2024-12-06T08:15:05,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:06,443 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-06T08:15:06,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:07,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:08,464 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308/b6b797fc3981%2C33733%2C1733472858308.meta.1733472859052.meta after 4001ms 2024-12-06T08:15:08,465 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/WALs/b6b797fc3981,33733,1733472858308 2024-12-06T08:15:08,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741843_1027 (size=910) 2024-12-06T08:15:08,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741843_1027 (size=910) 2024-12-06T08:15:08,467 DEBUG [RS:0;b6b797fc3981:33733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:15:08,467 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:15:08,468 INFO [RS:0;b6b797fc3981:33733 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:15:08,468 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:15:08,468 INFO [RS:0;b6b797fc3981:33733 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33733 2024-12-06T08:15:08,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:15:08,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,33733,1733472858308 2024-12-06T08:15:08,472 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,33733,1733472858308] 2024-12-06T08:15:08,472 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,33733,1733472858308; numProcessing=1 2024-12-06T08:15:08,474 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,33733,1733472858308 already deleted, retry=false 2024-12-06T08:15:08,474 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,33733,1733472858308 expired; onlineServers=0 2024-12-06T08:15:08,474 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,35039,1733472858248' ***** 2024-12-06T08:15:08,474 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:15:08,474 DEBUG [M:0;b6b797fc3981:35039 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d2b9a89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:15:08,474 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,35039,1733472858248 2024-12-06T08:15:08,474 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,35039,1733472858248; all regions closed. 2024-12-06T08:15:08,474 DEBUG [M:0;b6b797fc3981:35039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:15:08,474 DEBUG [M:0;b6b797fc3981:35039 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:15:08,474 DEBUG [M:0;b6b797fc3981:35039 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:15:08,474 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:15:08,474 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472858451 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472858451,5,FailOnTimeoutGroup] 2024-12-06T08:15:08,474 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472858451 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472858451,5,FailOnTimeoutGroup] 2024-12-06T08:15:08,475 INFO [M:0;b6b797fc3981:35039 {}] hbase.ChoreService(370): Chore service for: master/b6b797fc3981:0 had [] on shutdown 2024-12-06T08:15:08,475 DEBUG [M:0;b6b797fc3981:35039 {}] master.HMaster(1733): Stopping service threads 2024-12-06T08:15:08,475 INFO [M:0;b6b797fc3981:35039 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:15:08,475 INFO [M:0;b6b797fc3981:35039 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:15:08,475 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:15:08,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:15:08,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:08,476 DEBUG [M:0;b6b797fc3981:35039 {}] zookeeper.ZKUtil(347): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:15:08,476 WARN [M:0;b6b797fc3981:35039 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:15:08,476 INFO [M:0;b6b797fc3981:35039 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T08:15:08,476 INFO [M:0;b6b797fc3981:35039 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:15:08,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:15:08,476 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:15:08,476 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:08,476 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:08,476 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:15:08,476 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:08,476 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=40.09 KB heapSize=49.26 KB 2024-12-06T08:15:08,501 DEBUG [M:0;b6b797fc3981:35039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77568c8919df4e4c97926471595b9e9f is 82, key is hbase:meta,,1/info:regioninfo/1733472859075/Put/seqid=0 2024-12-06T08:15:08,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741846_1031 (size=5672) 2024-12-06T08:15:08,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741846_1031 (size=5672) 2024-12-06T08:15:08,507 INFO [M:0;b6b797fc3981:35039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77568c8919df4e4c97926471595b9e9f 2024-12-06T08:15:08,528 DEBUG [M:0;b6b797fc3981:35039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1374097eaec4ffe94b29028e704145c is 778, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733472860009/Put/seqid=0 2024-12-06T08:15:08,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741847_1032 (size=7469) 2024-12-06T08:15:08,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741847_1032 (size=7469) 2024-12-06T08:15:08,534 INFO [M:0;b6b797fc3981:35039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=39.49 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1374097eaec4ffe94b29028e704145c 2024-12-06T08:15:08,558 DEBUG [M:0;b6b797fc3981:35039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e14a6c4a2698414fb5669708f51ad652 is 69, key is b6b797fc3981,33733,1733472858308/rs:state/1733472858546/Put/seqid=0 2024-12-06T08:15:08,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741848_1033 (size=5156) 2024-12-06T08:15:08,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741848_1033 (size=5156) 2024-12-06T08:15:08,564 INFO [M:0;b6b797fc3981:35039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e14a6c4a2698414fb5669708f51ad652 2024-12-06T08:15:08,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:15:08,572 INFO [RS:0;b6b797fc3981:33733 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,33733,1733472858308; zookeeper connection closed. 2024-12-06T08:15:08,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33733-0x100666469600001, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:15:08,572 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5393d14a {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5393d14a 2024-12-06T08:15:08,573 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T08:15:08,592 DEBUG [M:0;b6b797fc3981:35039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/802b8db105324bff99f48aaf770659f3 is 52, key is load_balancer_on/state:d/1733472859636/Put/seqid=0 2024-12-06T08:15:08,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741849_1034 (size=5056) 2024-12-06T08:15:08,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741849_1034 (size=5056) 2024-12-06T08:15:08,598 INFO [M:0;b6b797fc3981:35039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/802b8db105324bff99f48aaf770659f3 2024-12-06T08:15:08,603 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77568c8919df4e4c97926471595b9e9f as hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/77568c8919df4e4c97926471595b9e9f 2024-12-06T08:15:08,609 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/77568c8919df4e4c97926471595b9e9f, entries=8, sequenceid=96, filesize=5.5 K 2024-12-06T08:15:08,609 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f1374097eaec4ffe94b29028e704145c as hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f1374097eaec4ffe94b29028e704145c 2024-12-06T08:15:08,614 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f1374097eaec4ffe94b29028e704145c, entries=11, sequenceid=96, filesize=7.3 K 2024-12-06T08:15:08,615 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e14a6c4a2698414fb5669708f51ad652 as hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e14a6c4a2698414fb5669708f51ad652 2024-12-06T08:15:08,620 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e14a6c4a2698414fb5669708f51ad652, entries=1, sequenceid=96, filesize=5.0 K 2024-12-06T08:15:08,621 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/802b8db105324bff99f48aaf770659f3 as hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/802b8db105324bff99f48aaf770659f3 2024-12-06T08:15:08,626 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38689/user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/802b8db105324bff99f48aaf770659f3, entries=1, sequenceid=96, filesize=4.9 K 2024-12-06T08:15:08,627 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(3040): Finished flush of dataSize ~40.09 KB/41052, heapSize ~49.20 KB/50376, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=96, compaction requested=false 2024-12-06T08:15:08,629 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:08,629 DEBUG [M:0;b6b797fc3981:35039 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:15:08,629 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/2195c4f3-59b9-4f4b-369f-0d982e0c2548/MasterData/WALs/b6b797fc3981,35039,1733472858248 2024-12-06T08:15:08,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37227 is added to blk_1073741841_1023 (size=757) 2024-12-06T08:15:08,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44837 is added to blk_1073741841_1023 (size=757) 2024-12-06T08:15:08,631 INFO [M:0;b6b797fc3981:35039 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T08:15:08,631 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:15:08,631 INFO [M:0;b6b797fc3981:35039 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35039 2024-12-06T08:15:08,634 DEBUG [M:0;b6b797fc3981:35039 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b6b797fc3981,35039,1733472858248 already deleted, retry=false 2024-12-06T08:15:08,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:15:08,736 INFO [M:0;b6b797fc3981:35039 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,35039,1733472858248; zookeeper connection closed. 2024-12-06T08:15:08,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35039-0x100666469600000, quorum=127.0.0.1:53536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:15:08,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@168f7695{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:15:08,739 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d5f9043{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:15:08,739 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:15:08,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f4fdc7d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:15:08,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@694271f6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,STOPPED} 2024-12-06T08:15:08,741 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:15:08,741 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:15:08,741 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:15:08,741 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-775885647-172.17.0.2-1733472857543 (Datanode Uuid fbbf122c-11f0-4f1e-89c7-afcec1c3fd0e) service to localhost/127.0.0.1:38689 2024-12-06T08:15:08,742 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data3/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:15:08,742 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data4/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:15:08,742 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:15:08,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@497d199b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:15:08,744 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@738d46da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:15:08,745 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:15:08,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a7daec6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:15:08,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@631e3561{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,STOPPED} 2024-12-06T08:15:08,746 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:15:08,746 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:15:08,747 WARN [BP-775885647-172.17.0.2-1733472857543 heartbeating to localhost/127.0.0.1:38689 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-775885647-172.17.0.2-1733472857543 (Datanode Uuid 347734b2-08ee-4766-a60a-25af9c6395cb) service to localhost/127.0.0.1:38689 2024-12-06T08:15:08,747 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:15:08,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data1/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:15:08,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/cluster_435b20a6-3fe4-680d-ac0c-e57152af9967/dfs/data/data2/current/BP-775885647-172.17.0.2-1733472857543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:15:08,747 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:15:08,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@340ac765{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:15:08,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39e2672a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:15:08,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:15:08,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@579b5c9d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:15:08,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@253120b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir/,STOPPED} 2024-12-06T08:15:08,761 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:15:08,777 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T08:15:08,785 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=100 (was 86) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:38689 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:38689 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-29-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38689 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38689 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (869306163) connection to localhost/127.0.0.1:38689 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:38689 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-28-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-29-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=444 (was 428) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=112 (was 130), ProcessCount=11 (was 11), AvailableMemoryMB=8024 (was 8119) 2024-12-06T08:15:08,792 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=100, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=112, ProcessCount=11, AvailableMemoryMB=8024 2024-12-06T08:15:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:15:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.log.dir so I do NOT create it in target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213 2024-12-06T08:15:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/4ee72cfa-51f6-eb70-69f5-d9e9335d4df5/hadoop.tmp.dir so I do NOT create it in target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213 2024-12-06T08:15:08,792 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7, deleteOnExit=true 2024-12-06T08:15:08,792 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/test.cache.data in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T08:15:08,793 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:15:08,793 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:15:08,794 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:15:08,807 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:15:08,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:08,886 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:15:08,891 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:15:08,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:15:08,892 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:15:08,892 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:15:08,894 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:15:08,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11f3953b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:15:08,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8d103eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:15:09,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5657bbb8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/java.io.tmpdir/jetty-localhost-36021-hadoop-hdfs-3_4_1-tests_jar-_-any-6067288611350078496/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:15:09,009 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@649a505c{HTTP/1.1, (http/1.1)}{localhost:36021} 2024-12-06T08:15:09,010 INFO [Time-limited test {}] server.Server(415): Started @223392ms 2024-12-06T08:15:09,023 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:15:09,093 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:15:09,096 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:15:09,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:15:09,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:15:09,096 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:15:09,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54f4c6b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:15:09,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fd1989a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:15:09,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@176cd05e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/java.io.tmpdir/jetty-localhost-40277-hadoop-hdfs-3_4_1-tests_jar-_-any-12850778623733100275/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:15:09,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12a372f6{HTTP/1.1, (http/1.1)}{localhost:40277} 2024-12-06T08:15:09,212 INFO [Time-limited test {}] server.Server(415): Started @223595ms 2024-12-06T08:15:09,214 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:15:09,252 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:15:09,257 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:15:09,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:15:09,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:15:09,260 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:15:09,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b944f85{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:15:09,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58ac6791{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:15:09,315 WARN [Thread-1360 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data1/current/BP-1195693227-172.17.0.2-1733472908825/current, will proceed with Du for space computation calculation, 2024-12-06T08:15:09,315 WARN [Thread-1361 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data2/current/BP-1195693227-172.17.0.2-1733472908825/current, will proceed with Du for space computation calculation, 2024-12-06T08:15:09,332 WARN [Thread-1339 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:15:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x941127e15970e9b0 with lease ID 0x77c2ebaaa2a48f71: Processing first storage report for DS-32b4bad3-64b1-4655-adf7-925c6d47c946 from datanode DatanodeRegistration(127.0.0.1:34717, datanodeUuid=24631b3a-5101-4fc5-966c-b6e267d53edc, infoPort=41703, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825) 2024-12-06T08:15:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x941127e15970e9b0 with lease ID 0x77c2ebaaa2a48f71: from storage DS-32b4bad3-64b1-4655-adf7-925c6d47c946 node DatanodeRegistration(127.0.0.1:34717, datanodeUuid=24631b3a-5101-4fc5-966c-b6e267d53edc, infoPort=41703, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T08:15:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x941127e15970e9b0 with lease ID 0x77c2ebaaa2a48f71: Processing first storage report for DS-712075eb-a75a-44d1-acb0-683348321bc8 from datanode DatanodeRegistration(127.0.0.1:34717, datanodeUuid=24631b3a-5101-4fc5-966c-b6e267d53edc, infoPort=41703, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825) 2024-12-06T08:15:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x941127e15970e9b0 with lease ID 0x77c2ebaaa2a48f71: from storage DS-712075eb-a75a-44d1-acb0-683348321bc8 node DatanodeRegistration(127.0.0.1:34717, datanodeUuid=24631b3a-5101-4fc5-966c-b6e267d53edc, infoPort=41703, infoSecurePort=0, ipcPort=33101, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:15:09,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@671ea749{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/java.io.tmpdir/jetty-localhost-44341-hadoop-hdfs-3_4_1-tests_jar-_-any-16517431174709511813/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:15:09,385 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54ce3781{HTTP/1.1, (http/1.1)}{localhost:44341} 2024-12-06T08:15:09,385 INFO [Time-limited test {}] server.Server(415): Started @223768ms 2024-12-06T08:15:09,387 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:15:09,478 WARN [Thread-1386 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data3/current/BP-1195693227-172.17.0.2-1733472908825/current, will proceed with Du for space computation calculation, 2024-12-06T08:15:09,478 WARN [Thread-1387 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data4/current/BP-1195693227-172.17.0.2-1733472908825/current, will proceed with Du for space computation calculation, 2024-12-06T08:15:09,501 WARN [Thread-1375 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:15:09,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d489376a5f08134 with lease ID 0x77c2ebaaa2a48f72: Processing first storage report for DS-4601db02-14ca-4309-a9bc-437beb522f99 from datanode DatanodeRegistration(127.0.0.1:43811, datanodeUuid=0176b63f-4e79-48a6-9954-5a92bd45854d, infoPort=46317, infoSecurePort=0, ipcPort=34739, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825) 2024-12-06T08:15:09,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d489376a5f08134 with lease ID 0x77c2ebaaa2a48f72: from storage DS-4601db02-14ca-4309-a9bc-437beb522f99 node DatanodeRegistration(127.0.0.1:43811, datanodeUuid=0176b63f-4e79-48a6-9954-5a92bd45854d, infoPort=46317, infoSecurePort=0, ipcPort=34739, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-06T08:15:09,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d489376a5f08134 with lease ID 0x77c2ebaaa2a48f72: Processing first storage report for DS-433cf363-a137-4ef9-a1d9-601c8516b338 from datanode DatanodeRegistration(127.0.0.1:43811, datanodeUuid=0176b63f-4e79-48a6-9954-5a92bd45854d, infoPort=46317, infoSecurePort=0, ipcPort=34739, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825) 2024-12-06T08:15:09,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d489376a5f08134 with lease ID 0x77c2ebaaa2a48f72: from storage DS-433cf363-a137-4ef9-a1d9-601c8516b338 node DatanodeRegistration(127.0.0.1:43811, datanodeUuid=0176b63f-4e79-48a6-9954-5a92bd45854d, infoPort=46317, infoSecurePort=0, ipcPort=34739, storageInfo=lv=-57;cid=testClusterID;nsid=1411517897;c=1733472908825), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:15:09,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,515 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213 2024-12-06T08:15:09,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,520 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/zookeeper_0, clientPort=50705, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:15:09,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,524 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=50705 2024-12-06T08:15:09,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:09,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:09,526 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:09,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:15:09,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:15:09,542 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b with version=8 2024-12-06T08:15:09,542 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/hbase-staging 2024-12-06T08:15:09,544 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:15:09,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:15:09,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:15:09,544 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:15:09,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:15:09,544 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:15:09,544 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:15:09,544 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:15:09,545 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45299 2024-12-06T08:15:09,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:09,548 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:09,550 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:45299 connecting to ZooKeeper ensemble=127.0.0.1:50705 2024-12-06T08:15:09,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452990x0, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:15:09,558 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45299-0x100666531b40000 connected 2024-12-06T08:15:09,580 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:15:09,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:15:09,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:15:09,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45299 2024-12-06T08:15:09,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45299 2024-12-06T08:15:09,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45299 2024-12-06T08:15:09,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45299 2024-12-06T08:15:09,590 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45299 2024-12-06T08:15:09,592 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b, hbase.cluster.distributed=false 2024-12-06T08:15:09,616 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:15:09,616 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:15:09,616 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:15:09,617 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:15:09,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:15:09,617 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:15:09,617 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:15:09,617 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:15:09,618 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42867 2024-12-06T08:15:09,618 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:15:09,620 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:15:09,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:09,622 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:09,625 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42867 connecting to ZooKeeper ensemble=127.0.0.1:50705 2024-12-06T08:15:09,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428670x0, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:15:09,628 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428670x0, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:15:09,628 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42867-0x100666531b40001 connected 2024-12-06T08:15:09,629 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:15:09,629 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:15:09,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42867 2024-12-06T08:15:09,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42867 2024-12-06T08:15:09,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42867 2024-12-06T08:15:09,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42867 2024-12-06T08:15:09,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42867 2024-12-06T08:15:09,639 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b6b797fc3981,45299,1733472909543 2024-12-06T08:15:09,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:15:09,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:15:09,641 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b6b797fc3981,45299,1733472909543 2024-12-06T08:15:09,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:15:09,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:15:09,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,644 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:15:09,644 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b6b797fc3981,45299,1733472909543 from backup master directory 2024-12-06T08:15:09,645 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:15:09,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b6b797fc3981,45299,1733472909543 2024-12-06T08:15:09,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:15:09,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:15:09,646 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:15:09,646 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b6b797fc3981,45299,1733472909543 2024-12-06T08:15:09,652 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b6b797fc3981:45299 2024-12-06T08:15:09,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:15:09,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:15:09,660 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/hbase.id with ID: bccf7cd9-c5a0-4630-b391-37bfe286dee8 2024-12-06T08:15:09,670 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:09,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:15:09,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:15:09,684 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:15:09,685 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:15:09,685 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:15:09,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:15:09,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:15:09,692 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store 2024-12-06T08:15:09,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:15:09,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:15:09,698 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:09,698 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:15:09,698 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:09,698 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:09,698 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:15:09,698 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:09,698 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:15:09,698 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:15:09,699 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/.initializing 2024-12-06T08:15:09,699 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/WALs/b6b797fc3981,45299,1733472909543 2024-12-06T08:15:09,701 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C45299%2C1733472909543, suffix=, logDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/WALs/b6b797fc3981,45299,1733472909543, archiveDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/oldWALs, maxLogs=10 2024-12-06T08:15:09,702 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C45299%2C1733472909543.1733472909702 2024-12-06T08:15:09,706 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/WALs/b6b797fc3981,45299,1733472909543/b6b797fc3981%2C45299%2C1733472909543.1733472909702 2024-12-06T08:15:09,706 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46317:46317),(127.0.0.1/127.0.0.1:41703:41703)] 2024-12-06T08:15:09,706 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:15:09,707 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:09,707 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,707 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,709 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:15:09,710 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:15:09,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:15:09,711 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:15:09,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:15:09,713 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:15:09,713 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:15:09,715 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:15:09,716 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,716 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,718 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:15:09,719 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:15:09,721 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:15:09,722 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752511, jitterRate=-0.0431332141160965}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:15:09,722 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:15:09,723 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:15:09,726 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a0b962f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:15:09,727 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T08:15:09,727 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:15:09,727 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:15:09,727 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:15:09,727 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T08:15:09,728 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T08:15:09,728 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:15:09,729 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:15:09,730 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:15:09,731 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:15:09,732 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:15:09,732 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:15:09,734 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:15:09,734 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:15:09,735 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:15:09,736 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:15:09,737 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:15:09,738 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:15:09,739 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:15:09,741 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:15:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:15:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:15:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,743 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b6b797fc3981,45299,1733472909543, sessionid=0x100666531b40000, setting cluster-up flag (Was=false) 2024-12-06T08:15:09,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,751 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:15:09,752 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,45299,1733472909543 2024-12-06T08:15:09,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:09,760 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:15:09,761 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,45299,1733472909543 2024-12-06T08:15:09,764 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T08:15:09,764 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T08:15:09,764 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:15:09,764 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b6b797fc3981,45299,1733472909543 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b6b797fc3981:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:15:09,765 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733472939766 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:15:09,766 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:15:09,766 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,766 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:15:09,767 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:15:09,767 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:15:09,767 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:15:09,767 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:15:09,767 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:15:09,767 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472909767,5,FailOnTimeoutGroup] 2024-12-06T08:15:09,768 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,768 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:15:09,768 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472909767,5,FailOnTimeoutGroup] 2024-12-06T08:15:09,768 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,768 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:15:09,768 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,768 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:15:09,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:15:09,776 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T08:15:09,776 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b 2024-12-06T08:15:09,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:15:09,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:15:09,783 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:09,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:15:09,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:15:09,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:15:09,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:15:09,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:15:09,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:15:09,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:15:09,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:15:09,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:09,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:15:09,790 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740 2024-12-06T08:15:09,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740 2024-12-06T08:15:09,792 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:15:09,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:15:09,795 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:15:09,796 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705365, jitterRate=-0.1030823290348053}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:15:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:15:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:15:09,796 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:15:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:15:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:15:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:15:09,796 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:15:09,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:15:09,797 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:15:09,797 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T08:15:09,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:15:09,799 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:15:09,799 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:15:09,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:09,851 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b6b797fc3981:42867 2024-12-06T08:15:09,852 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1008): ClusterId : bccf7cd9-c5a0-4630-b391-37bfe286dee8 2024-12-06T08:15:09,852 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:15:09,854 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:15:09,854 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:15:09,856 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:15:09,856 DEBUG [RS:0;b6b797fc3981:42867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bced13c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:15:09,857 DEBUG [RS:0;b6b797fc3981:42867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@514425f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:15:09,857 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:15:09,857 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:15:09,857 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:15:09,857 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,45299,1733472909543 with isa=b6b797fc3981/172.17.0.2:42867, startcode=1733472909616 2024-12-06T08:15:09,857 DEBUG [RS:0;b6b797fc3981:42867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:15:09,859 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37269, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:15:09,860 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45299 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,42867,1733472909616 2024-12-06T08:15:09,860 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45299 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,42867,1733472909616 2024-12-06T08:15:09,861 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b 2024-12-06T08:15:09,861 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40157 2024-12-06T08:15:09,861 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:15:09,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:15:09,863 DEBUG [RS:0;b6b797fc3981:42867 {}] zookeeper.ZKUtil(111): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,42867,1733472909616 2024-12-06T08:15:09,863 WARN [RS:0;b6b797fc3981:42867 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:15:09,863 INFO [RS:0;b6b797fc3981:42867 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:15:09,863 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616 2024-12-06T08:15:09,864 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,42867,1733472909616] 2024-12-06T08:15:09,866 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:15:09,866 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:15:09,868 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:15:09,868 INFO [RS:0;b6b797fc3981:42867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:15:09,868 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,871 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:15:09,872 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,872 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,872 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,872 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:15:09,873 DEBUG [RS:0;b6b797fc3981:42867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:15:09,874 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,874 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,874 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,874 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,874 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42867,1733472909616-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:15:09,889 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:15:09,890 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42867,1733472909616-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:09,904 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.Replication(204): b6b797fc3981,42867,1733472909616 started 2024-12-06T08:15:09,904 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,42867,1733472909616, RpcServer on b6b797fc3981/172.17.0.2:42867, sessionid=0x100666531b40001 2024-12-06T08:15:09,905 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:15:09,905 DEBUG [RS:0;b6b797fc3981:42867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,42867,1733472909616 2024-12-06T08:15:09,905 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,42867,1733472909616' 2024-12-06T08:15:09,905 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:15:09,905 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:15:09,906 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:15:09,906 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:15:09,906 DEBUG [RS:0;b6b797fc3981:42867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,42867,1733472909616 2024-12-06T08:15:09,906 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,42867,1733472909616' 2024-12-06T08:15:09,906 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:15:09,906 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:15:09,906 DEBUG [RS:0;b6b797fc3981:42867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:15:09,906 INFO [RS:0;b6b797fc3981:42867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:15:09,906 INFO [RS:0;b6b797fc3981:42867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:15:09,950 WARN [b6b797fc3981:45299 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:15:10,009 INFO [RS:0;b6b797fc3981:42867 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C42867%2C1733472909616, suffix=, logDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616, archiveDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/oldWALs, maxLogs=32 2024-12-06T08:15:10,010 INFO [RS:0;b6b797fc3981:42867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C42867%2C1733472909616.1733472910010 2024-12-06T08:15:10,020 INFO [RS:0;b6b797fc3981:42867 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472910010 2024-12-06T08:15:10,020 DEBUG [RS:0;b6b797fc3981:42867 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46317:46317),(127.0.0.1/127.0.0.1:41703:41703)] 2024-12-06T08:15:10,026 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:15:10,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,026 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:10,200 DEBUG [b6b797fc3981:45299 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T08:15:10,200 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b6b797fc3981,42867,1733472909616 2024-12-06T08:15:10,202 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,42867,1733472909616, state=OPENING 2024-12-06T08:15:10,203 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:15:10,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:10,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:10,206 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b6b797fc3981,42867,1733472909616}] 2024-12-06T08:15:10,206 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:15:10,206 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:15:10,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:15:10,359 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:15:10,361 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:15:10,364 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T08:15:10,364 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:15:10,366 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C42867%2C1733472909616.meta, suffix=.meta, logDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616, archiveDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/oldWALs, maxLogs=32 2024-12-06T08:15:10,367 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C42867%2C1733472909616.meta.1733472910367.meta 2024-12-06T08:15:10,372 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.meta.1733472910367.meta 2024-12-06T08:15:10,372 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41703:41703),(127.0.0.1/127.0.0.1:46317:46317)] 2024-12-06T08:15:10,372 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:15:10,372 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:15:10,372 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:15:10,373 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:15:10,373 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:15:10,373 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:10,373 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T08:15:10,373 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T08:15:10,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:15:10,375 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:15:10,375 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:10,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:15:10,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:15:10,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:15:10,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:10,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:15:10,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:15:10,377 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:15:10,377 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:10,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:15:10,378 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740 2024-12-06T08:15:10,379 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740 2024-12-06T08:15:10,380 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:15:10,382 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:15:10,382 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861210, jitterRate=0.09508538246154785}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:15:10,382 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:15:10,383 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733472910358 2024-12-06T08:15:10,385 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:15:10,385 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T08:15:10,386 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,42867,1733472909616 2024-12-06T08:15:10,387 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,42867,1733472909616, state=OPEN 2024-12-06T08:15:10,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:15:10,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:15:10,392 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:15:10,392 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:15:10,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:15:10,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b6b797fc3981,42867,1733472909616 in 186 msec 2024-12-06T08:15:10,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:15:10,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 597 msec 2024-12-06T08:15:10,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 633 msec 2024-12-06T08:15:10,398 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733472910398, completionTime=-1 2024-12-06T08:15:10,398 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T08:15:10,398 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T08:15:10,398 DEBUG [hconnection-0x4e848c6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:15:10,400 INFO [RS-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:15:10,401 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T08:15:10,401 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733472970401 2024-12-06T08:15:10,401 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733473030401 2024-12-06T08:15:10,401 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-06T08:15:10,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,45299,1733472909543-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:10,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,45299,1733472909543-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:10,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,45299,1733472909543-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:10,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b6b797fc3981:45299, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:10,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:10,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T08:15:10,406 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:15:10,407 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T08:15:10,408 DEBUG [master/b6b797fc3981:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T08:15:10,408 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:15:10,408 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:10,409 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:15:10,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:15:10,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:15:10,419 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 460891ce210878d42ba017d44194dc9c, NAME => 'hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b 2024-12-06T08:15:10,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:15:10,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:15:10,426 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:10,426 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 460891ce210878d42ba017d44194dc9c, disabling compactions & flushes 2024-12-06T08:15:10,426 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:10,426 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:10,426 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. after waiting 0 ms 2024-12-06T08:15:10,426 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:10,426 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:10,426 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 460891ce210878d42ba017d44194dc9c: 2024-12-06T08:15:10,427 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:15:10,428 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733472910427"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472910427"}]},"ts":"1733472910427"} 2024-12-06T08:15:10,430 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:15:10,431 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:15:10,431 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472910431"}]},"ts":"1733472910431"} 2024-12-06T08:15:10,432 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T08:15:10,436 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=460891ce210878d42ba017d44194dc9c, ASSIGN}] 2024-12-06T08:15:10,437 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=460891ce210878d42ba017d44194dc9c, ASSIGN 2024-12-06T08:15:10,438 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=460891ce210878d42ba017d44194dc9c, ASSIGN; state=OFFLINE, location=b6b797fc3981,42867,1733472909616; forceNewPlan=false, retain=false 2024-12-06T08:15:10,589 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=460891ce210878d42ba017d44194dc9c, regionState=OPENING, regionLocation=b6b797fc3981,42867,1733472909616 2024-12-06T08:15:10,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 460891ce210878d42ba017d44194dc9c, server=b6b797fc3981,42867,1733472909616}] 2024-12-06T08:15:10,743 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:15:10,748 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:10,748 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 460891ce210878d42ba017d44194dc9c, NAME => 'hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:15:10,748 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 460891ce210878d42ba017d44194dc9c 2024-12-06T08:15:10,748 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:10,748 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 460891ce210878d42ba017d44194dc9c 2024-12-06T08:15:10,748 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 460891ce210878d42ba017d44194dc9c 2024-12-06T08:15:10,750 INFO [StoreOpener-460891ce210878d42ba017d44194dc9c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 460891ce210878d42ba017d44194dc9c 2024-12-06T08:15:10,751 INFO [StoreOpener-460891ce210878d42ba017d44194dc9c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 460891ce210878d42ba017d44194dc9c columnFamilyName info 2024-12-06T08:15:10,752 DEBUG [StoreOpener-460891ce210878d42ba017d44194dc9c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:10,752 INFO [StoreOpener-460891ce210878d42ba017d44194dc9c-1 {}] regionserver.HStore(327): Store=460891ce210878d42ba017d44194dc9c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:15:10,753 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c 2024-12-06T08:15:10,753 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c 2024-12-06T08:15:10,755 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 460891ce210878d42ba017d44194dc9c 2024-12-06T08:15:10,757 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:15:10,757 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 460891ce210878d42ba017d44194dc9c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694088, jitterRate=-0.11742205917835236}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:15:10,758 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 460891ce210878d42ba017d44194dc9c: 2024-12-06T08:15:10,759 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c., pid=6, masterSystemTime=1733472910743 2024-12-06T08:15:10,761 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:10,761 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:10,762 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=460891ce210878d42ba017d44194dc9c, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,42867,1733472909616 2024-12-06T08:15:10,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:15:10,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 460891ce210878d42ba017d44194dc9c, server=b6b797fc3981,42867,1733472909616 in 173 msec 2024-12-06T08:15:10,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:15:10,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=460891ce210878d42ba017d44194dc9c, ASSIGN in 330 msec 2024-12-06T08:15:10,769 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:15:10,769 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472910769"}]},"ts":"1733472910769"} 2024-12-06T08:15:10,771 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T08:15:10,774 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:15:10,775 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 368 msec 2024-12-06T08:15:10,808 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T08:15:10,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:15:10,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:10,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:15:10,815 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T08:15:10,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:15:10,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 13 msec 2024-12-06T08:15:10,837 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T08:15:10,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:10,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:15:10,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 11 msec 2024-12-06T08:15:10,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T08:15:10,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T08:15:10,864 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.218sec 2024-12-06T08:15:10,865 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:15:10,865 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:15:10,865 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:15:10,865 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:15:10,865 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:15:10,865 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,45299,1733472909543-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:15:10,865 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,45299,1733472909543-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:15:10,867 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:15:10,867 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:15:10,867 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,45299,1733472909543-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:15:10,941 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49ad4474 to 127.0.0.1:50705 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b032560 2024-12-06T08:15:10,945 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@644a11c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:15:10,946 DEBUG [hconnection-0x3bd27bc6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:15:10,948 INFO [RS-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:15:10,949 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b6b797fc3981,45299,1733472909543 2024-12-06T08:15:10,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:15:10,954 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T08:15:10,955 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:15:10,957 INFO [RS-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:15:10,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T08:15:10,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T08:15:10,958 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:15:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:10,961 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:15:10,961 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:10,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 9 2024-12-06T08:15:10,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:15:10,962 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:15:10,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741837_1013 (size=405) 2024-12-06T08:15:10,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741837_1013 (size=405) 2024-12-06T08:15:10,970 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 76025a36497f8831463b043642e42bbe, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b 2024-12-06T08:15:10,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741838_1014 (size=88) 2024-12-06T08:15:10,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741838_1014 (size=88) 2024-12-06T08:15:10,976 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:10,977 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1681): Closing 76025a36497f8831463b043642e42bbe, disabling compactions & flushes 2024-12-06T08:15:10,977 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:10,977 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:10,977 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. after waiting 0 ms 2024-12-06T08:15:10,977 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:10,977 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:10,977 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:15:10,978 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:15:10,978 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733472910978"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472910978"}]},"ts":"1733472910978"} 2024-12-06T08:15:10,980 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:15:10,980 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:15:10,981 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472910981"}]},"ts":"1733472910981"} 2024-12-06T08:15:10,982 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-06T08:15:10,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76025a36497f8831463b043642e42bbe, ASSIGN}] 2024-12-06T08:15:10,987 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76025a36497f8831463b043642e42bbe, ASSIGN 2024-12-06T08:15:10,988 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76025a36497f8831463b043642e42bbe, ASSIGN; state=OFFLINE, location=b6b797fc3981,42867,1733472909616; forceNewPlan=false, retain=false 2024-12-06T08:15:11,138 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=76025a36497f8831463b043642e42bbe, regionState=OPENING, regionLocation=b6b797fc3981,42867,1733472909616 2024-12-06T08:15:11,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 76025a36497f8831463b043642e42bbe, server=b6b797fc3981,42867,1733472909616}] 2024-12-06T08:15:11,293 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:15:11,296 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:11,297 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 76025a36497f8831463b043642e42bbe, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:15:11,297 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 76025a36497f8831463b043642e42bbe 2024-12-06T08:15:11,297 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:15:11,297 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 76025a36497f8831463b043642e42bbe 2024-12-06T08:15:11,297 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 76025a36497f8831463b043642e42bbe 2024-12-06T08:15:11,299 INFO [StoreOpener-76025a36497f8831463b043642e42bbe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 76025a36497f8831463b043642e42bbe 2024-12-06T08:15:11,300 INFO [StoreOpener-76025a36497f8831463b043642e42bbe-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76025a36497f8831463b043642e42bbe columnFamilyName info 2024-12-06T08:15:11,300 DEBUG [StoreOpener-76025a36497f8831463b043642e42bbe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:15:11,300 INFO [StoreOpener-76025a36497f8831463b043642e42bbe-1 {}] regionserver.HStore(327): Store=76025a36497f8831463b043642e42bbe/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:15:11,301 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe 2024-12-06T08:15:11,301 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe 2024-12-06T08:15:11,303 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 76025a36497f8831463b043642e42bbe 2024-12-06T08:15:11,306 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:15:11,306 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 76025a36497f8831463b043642e42bbe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732813, jitterRate=-0.06818069517612457}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:15:11,307 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:15:11,308 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe., pid=11, masterSystemTime=1733472911293 2024-12-06T08:15:11,310 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:11,310 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:11,310 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=76025a36497f8831463b043642e42bbe, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,42867,1733472909616 2024-12-06T08:15:11,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T08:15:11,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 76025a36497f8831463b043642e42bbe, server=b6b797fc3981,42867,1733472909616 in 172 msec 2024-12-06T08:15:11,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T08:15:11,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76025a36497f8831463b043642e42bbe, ASSIGN in 328 msec 2024-12-06T08:15:11,316 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:15:11,317 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472911316"}]},"ts":"1733472911316"} 2024-12-06T08:15:11,318 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-06T08:15:11,321 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:15:11,322 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 363 msec 2024-12-06T08:15:11,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:12,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:13,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:14,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:15,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:15,875 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:15:15,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,876 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:15:15,907 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T08:15:15,908 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-06T08:15:16,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:17,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:18,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:19,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:19,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor238.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:15:20,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:20,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:15:20,963 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 9 completed 2024-12-06T08:15:20,966 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:20,966 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:20,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush hbase:namespace 2024-12-06T08:15:20,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace 2024-12-06T08:15:20,979 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:15:20,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:15:20,980 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:15:20,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:15:21,141 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:15:21,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42867 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-06T08:15:21,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:21,143 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 460891ce210878d42ba017d44194dc9c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T08:15:21,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c/.tmp/info/8c1290c8e3f14eb687a9b2fdb99d4377 is 45, key is default/info:d/1733472910819/Put/seqid=0 2024-12-06T08:15:21,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741839_1015 (size=5037) 2024-12-06T08:15:21,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741839_1015 (size=5037) 2024-12-06T08:15:21,165 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c/.tmp/info/8c1290c8e3f14eb687a9b2fdb99d4377 2024-12-06T08:15:21,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c/.tmp/info/8c1290c8e3f14eb687a9b2fdb99d4377 as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c/info/8c1290c8e3f14eb687a9b2fdb99d4377 2024-12-06T08:15:21,178 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c/info/8c1290c8e3f14eb687a9b2fdb99d4377, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T08:15:21,179 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 460891ce210878d42ba017d44194dc9c in 36ms, sequenceid=6, compaction requested=false 2024-12-06T08:15:21,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 460891ce210878d42ba017d44194dc9c: 2024-12-06T08:15:21,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:15:21,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-06T08:15:21,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-06T08:15:21,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-06T08:15:21,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-12-06T08:15:21,189 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=hbase:namespace in 214 msec 2024-12-06T08:15:21,274 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:21,274 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T08:15:21,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:22,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:23,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:24,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:25,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:26,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:27,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:28,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:29,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:30,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:30,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-06T08:15:30,981 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: hbase:namespace, procId: 12 completed 2024-12-06T08:15:30,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:30,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:30,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:15:30,990 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:15:30,991 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:15:30,991 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:15:31,143 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:15:31,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42867 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-06T08:15:31,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:31,144 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 76025a36497f8831463b043642e42bbe 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T08:15:31,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/52b80af6e667430cb69725587c70195a is 1080, key is row0001/info:/1733472930984/Put/seqid=0 2024-12-06T08:15:31,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741840_1016 (size=6033) 2024-12-06T08:15:31,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741840_1016 (size=6033) 2024-12-06T08:15:31,167 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/52b80af6e667430cb69725587c70195a 2024-12-06T08:15:31,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/52b80af6e667430cb69725587c70195a as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/52b80af6e667430cb69725587c70195a 2024-12-06T08:15:31,178 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/52b80af6e667430cb69725587c70195a, entries=1, sequenceid=5, filesize=5.9 K 2024-12-06T08:15:31,179 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76025a36497f8831463b043642e42bbe in 34ms, sequenceid=5, compaction requested=false 2024-12-06T08:15:31,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:15:31,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:31,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-06T08:15:31,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-06T08:15:31,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-06T08:15:31,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-12-06T08:15:31,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-12-06T08:15:31,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:32,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:33,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:34,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:35,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:36,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:37,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:38,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:39,515 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:15:39,857 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:15:39,858 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:15:39,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:40,712 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1fe245b8d8375ab9b801a570a1b2b5e8, had cached 0 bytes from a total of 23930 2024-12-06T08:15:40,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:40,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-06T08:15:40,991 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 14 completed 2024-12-06T08:15:40,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:40,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:40,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:15:40,999 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:15:40,999 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:15:41,000 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:15:41,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:15:41,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42867 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-06T08:15:41,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:41,153 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 76025a36497f8831463b043642e42bbe 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T08:15:41,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/344120dd97ac4f3a8a537c57fa643997 is 1080, key is row0002/info:/1733472940992/Put/seqid=0 2024-12-06T08:15:41,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741841_1017 (size=6033) 2024-12-06T08:15:41,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741841_1017 (size=6033) 2024-12-06T08:15:41,165 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/344120dd97ac4f3a8a537c57fa643997 2024-12-06T08:15:41,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/344120dd97ac4f3a8a537c57fa643997 as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/344120dd97ac4f3a8a537c57fa643997 2024-12-06T08:15:41,177 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/344120dd97ac4f3a8a537c57fa643997, entries=1, sequenceid=9, filesize=5.9 K 2024-12-06T08:15:41,178 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76025a36497f8831463b043642e42bbe in 26ms, sequenceid=9, compaction requested=false 2024-12-06T08:15:41,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:15:41,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:41,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-06T08:15:41,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-06T08:15:41,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-06T08:15:41,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-06T08:15:41,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-12-06T08:15:41,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:42,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:43,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:44,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:45,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:46,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:47,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:48,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:49,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:50,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:50,867 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T08:15:50,867 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T08:15:51,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-06T08:15:51,000 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 16 completed 2024-12-06T08:15:51,002 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C42867%2C1733472909616.1733472951002 2024-12-06T08:15:51,010 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472910010 with entries=13, filesize=6.41 KB; new WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472951002 2024-12-06T08:15:51,010 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41703:41703),(127.0.0.1/127.0.0.1:46317:46317)] 2024-12-06T08:15:51,010 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472910010 is not closed yet, will try archiving it next time 2024-12-06T08:15:51,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741833_1009 (size=6574) 2024-12-06T08:15:51,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741833_1009 (size=6574) 2024-12-06T08:15:51,014 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:51,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:15:51,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T08:15:51,016 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:15:51,017 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:15:51,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:15:51,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:15:51,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42867 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-06T08:15:51,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:51,170 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 76025a36497f8831463b043642e42bbe 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T08:15:51,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/32739a40473c4c94a9f5aaa634de72dc is 1080, key is row0003/info:/1733472951001/Put/seqid=0 2024-12-06T08:15:51,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741843_1019 (size=6033) 2024-12-06T08:15:51,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741843_1019 (size=6033) 2024-12-06T08:15:51,180 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/32739a40473c4c94a9f5aaa634de72dc 2024-12-06T08:15:51,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/32739a40473c4c94a9f5aaa634de72dc as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/32739a40473c4c94a9f5aaa634de72dc 2024-12-06T08:15:51,192 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/32739a40473c4c94a9f5aaa634de72dc, entries=1, sequenceid=13, filesize=5.9 K 2024-12-06T08:15:51,193 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76025a36497f8831463b043642e42bbe in 23ms, sequenceid=13, compaction requested=true 2024-12-06T08:15:51,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:15:51,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:15:51,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-06T08:15:51,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-06T08:15:51,196 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-06T08:15:51,196 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-12-06T08:15:51,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-12-06T08:15:51,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:52,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:53,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:54,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:55,748 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 460891ce210878d42ba017d44194dc9c, had cached 0 bytes from a total of 5037 2024-12-06T08:15:55,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:56,297 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 76025a36497f8831463b043642e42bbe, had cached 0 bytes from a total of 18099 2024-12-06T08:15:56,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:57,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:58,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:15:59,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:00,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-06T08:16:01,017 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 18 completed 2024-12-06T08:16:01,018 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:16:01,019 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:16:01,019 DEBUG [Time-limited test {}] regionserver.HStore(1540): 76025a36497f8831463b043642e42bbe/info is initiating minor compaction (all files) 2024-12-06T08:16:01,019 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:16:01,019 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:01,020 INFO [Time-limited test {}] regionserver.HRegion(2351): Starting compaction of 76025a36497f8831463b043642e42bbe/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:01,020 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/52b80af6e667430cb69725587c70195a, hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/344120dd97ac4f3a8a537c57fa643997, hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/32739a40473c4c94a9f5aaa634de72dc] into tmpdir=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp, totalSize=17.7 K 2024-12-06T08:16:01,020 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 52b80af6e667430cb69725587c70195a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733472930984 2024-12-06T08:16:01,021 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 344120dd97ac4f3a8a537c57fa643997, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733472940992 2024-12-06T08:16:01,021 DEBUG [Time-limited test {}] compactions.Compactor(224): Compacting 32739a40473c4c94a9f5aaa634de72dc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733472951001 2024-12-06T08:16:01,033 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 76025a36497f8831463b043642e42bbe#info#compaction#29 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:16:01,033 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/c8e5c6042b1f461d85e4dbc3eb0bb1b3 is 1080, key is row0001/info:/1733472930984/Put/seqid=0 2024-12-06T08:16:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741844_1020 (size=8296) 2024-12-06T08:16:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741844_1020 (size=8296) 2024-12-06T08:16:01,044 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/c8e5c6042b1f461d85e4dbc3eb0bb1b3 as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/c8e5c6042b1f461d85e4dbc3eb0bb1b3 2024-12-06T08:16:01,051 INFO [Time-limited test {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 76025a36497f8831463b043642e42bbe/info of 76025a36497f8831463b043642e42bbe into c8e5c6042b1f461d85e4dbc3eb0bb1b3(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:16:01,051 DEBUG [Time-limited test {}] regionserver.HRegion(2381): Compaction status journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:16:01,053 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C42867%2C1733472909616.1733472961053 2024-12-06T08:16:01,061 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472951002 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472961053 2024-12-06T08:16:01,061 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46317:46317),(127.0.0.1/127.0.0.1:41703:41703)] 2024-12-06T08:16:01,061 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472951002 is not closed yet, will try archiving it next time 2024-12-06T08:16:01,061 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472910010 to hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/oldWALs/b6b797fc3981%2C42867%2C1733472909616.1733472910010 2024-12-06T08:16:01,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741842_1018 (size=2520) 2024-12-06T08:16:01,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741842_1018 (size=2520) 2024-12-06T08:16:01,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:16:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:16:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T08:16:01,067 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-06T08:16:01,068 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-06T08:16:01,068 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-06T08:16:01,220 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,42867,1733472909616 2024-12-06T08:16:01,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42867 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-06T08:16:01,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:01,221 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 76025a36497f8831463b043642e42bbe 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T08:16:01,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/c199e70ffc534d23a230ec41f397ccfb is 1080, key is row0000/info:/1733472961051/Put/seqid=0 2024-12-06T08:16:01,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741846_1022 (size=6033) 2024-12-06T08:16:01,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741846_1022 (size=6033) 2024-12-06T08:16:01,232 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/c199e70ffc534d23a230ec41f397ccfb 2024-12-06T08:16:01,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/c199e70ffc534d23a230ec41f397ccfb as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/c199e70ffc534d23a230ec41f397ccfb 2024-12-06T08:16:01,244 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/c199e70ffc534d23a230ec41f397ccfb, entries=1, sequenceid=18, filesize=5.9 K 2024-12-06T08:16:01,245 INFO [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76025a36497f8831463b043642e42bbe in 24ms, sequenceid=18, compaction requested=false 2024-12-06T08:16:01,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:16:01,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:01,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-06T08:16:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-06T08:16:01,249 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-06T08:16:01,249 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-06T08:16:01,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-12-06T08:16:01,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:02,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:03,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:04,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:05,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:06,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:07,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:08,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:09,515 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:16:09,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:10,691 DEBUG [master/b6b797fc3981:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-06T08:16:10,692 DEBUG [master/b6b797fc3981:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 460891ce210878d42ba017d44194dc9c changed from -1.0 to 0.0, refreshing cache 2024-12-06T08:16:10,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:11,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45299 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-06T08:16:11,069 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling, procId: 20 completed 2024-12-06T08:16:11,071 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C42867%2C1733472909616.1733472971071 2024-12-06T08:16:11,078 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472961053 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472971071 2024-12-06T08:16:11,078 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41703:41703),(127.0.0.1/127.0.0.1:46317:46317)] 2024-12-06T08:16:11,078 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472961053 is not closed yet, will try archiving it next time 2024-12-06T08:16:11,078 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616/b6b797fc3981%2C42867%2C1733472909616.1733472951002 to hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/oldWALs/b6b797fc3981%2C42867%2C1733472909616.1733472951002 2024-12-06T08:16:11,078 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T08:16:11,079 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:16:11,079 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49ad4474 to 127.0.0.1:50705 2024-12-06T08:16:11,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:16:11,079 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:16:11,079 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1849260178, stopped=false 2024-12-06T08:16:11,079 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b6b797fc3981,45299,1733472909543 2024-12-06T08:16:11,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741845_1021 (size=2026) 2024-12-06T08:16:11,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741845_1021 (size=2026) 2024-12-06T08:16:11,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:16:11,081 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T08:16:11,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:16:11,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:11,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:11,081 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:16:11,081 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,42867,1733472909616' ***** 2024-12-06T08:16:11,081 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:16:11,081 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:16:11,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:16:11,082 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:16:11,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:16:11,082 INFO [RS:0;b6b797fc3981:42867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:16:11,082 INFO [RS:0;b6b797fc3981:42867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:16:11,082 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(3579): Received CLOSE for 460891ce210878d42ba017d44194dc9c 2024-12-06T08:16:11,082 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(3579): Received CLOSE for 76025a36497f8831463b043642e42bbe 2024-12-06T08:16:11,082 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,42867,1733472909616 2024-12-06T08:16:11,082 DEBUG [RS:0;b6b797fc3981:42867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:16:11,082 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 460891ce210878d42ba017d44194dc9c, disabling compactions & flushes 2024-12-06T08:16:11,082 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:16:11,082 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:16:11,082 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:16:11,082 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. after waiting 0 ms 2024-12-06T08:16:11,082 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:16:11,084 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:16:11,084 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:16:11,084 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:16:11,084 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-06T08:16:11,084 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1603): Online Regions={460891ce210878d42ba017d44194dc9c=hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c., 76025a36497f8831463b043642e42bbe=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe., 1588230740=hbase:meta,,1.1588230740} 2024-12-06T08:16:11,084 DEBUG [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 460891ce210878d42ba017d44194dc9c, 76025a36497f8831463b043642e42bbe 2024-12-06T08:16:11,084 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:16:11,085 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:16:11,085 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:16:11,085 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:16:11,085 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:16:11,085 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=3.05 KB heapSize=5.55 KB 2024-12-06T08:16:11,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/namespace/460891ce210878d42ba017d44194dc9c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T08:16:11,087 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:16:11,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 460891ce210878d42ba017d44194dc9c: 2024-12-06T08:16:11,087 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733472910406.460891ce210878d42ba017d44194dc9c. 2024-12-06T08:16:11,088 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 76025a36497f8831463b043642e42bbe, disabling compactions & flushes 2024-12-06T08:16:11,088 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:11,088 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:11,088 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. after waiting 0 ms 2024-12-06T08:16:11,088 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:11,088 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 76025a36497f8831463b043642e42bbe 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-06T08:16:11,092 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/ffea601189004c35baa83c64be36824a is 1080, key is row0001/info:/1733472971069/Put/seqid=0 2024-12-06T08:16:11,102 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/.tmp/info/83191a378615495abb4e880166437ffd is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe./info:regioninfo/1733472911310/Put/seqid=0 2024-12-06T08:16:11,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741848_1024 (size=6033) 2024-12-06T08:16:11,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741848_1024 (size=6033) 2024-12-06T08:16:11,104 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/ffea601189004c35baa83c64be36824a 2024-12-06T08:16:11,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741849_1025 (size=8430) 2024-12-06T08:16:11,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741849_1025 (size=8430) 2024-12-06T08:16:11,110 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/.tmp/info/83191a378615495abb4e880166437ffd 2024-12-06T08:16:11,112 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/.tmp/info/ffea601189004c35baa83c64be36824a as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/ffea601189004c35baa83c64be36824a 2024-12-06T08:16:11,118 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/ffea601189004c35baa83c64be36824a, entries=1, sequenceid=22, filesize=5.9 K 2024-12-06T08:16:11,119 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76025a36497f8831463b043642e42bbe in 30ms, sequenceid=22, compaction requested=true 2024-12-06T08:16:11,120 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/52b80af6e667430cb69725587c70195a, hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/344120dd97ac4f3a8a537c57fa643997, hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/32739a40473c4c94a9f5aaa634de72dc] to archive 2024-12-06T08:16:11,121 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:16:11,122 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/52b80af6e667430cb69725587c70195a to hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/52b80af6e667430cb69725587c70195a 2024-12-06T08:16:11,124 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/344120dd97ac4f3a8a537c57fa643997 to hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/344120dd97ac4f3a8a537c57fa643997 2024-12-06T08:16:11,125 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/32739a40473c4c94a9f5aaa634de72dc to hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/info/32739a40473c4c94a9f5aaa634de72dc 2024-12-06T08:16:11,129 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76025a36497f8831463b043642e42bbe/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-06T08:16:11,129 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:11,129 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 76025a36497f8831463b043642e42bbe: 2024-12-06T08:16:11,130 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733472910958.76025a36497f8831463b043642e42bbe. 2024-12-06T08:16:11,132 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/.tmp/table/1881a029d72a46f19a98803af473313d is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733472911316/Put/seqid=0 2024-12-06T08:16:11,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741850_1026 (size=5532) 2024-12-06T08:16:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741850_1026 (size=5532) 2024-12-06T08:16:11,140 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=264 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/.tmp/table/1881a029d72a46f19a98803af473313d 2024-12-06T08:16:11,146 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/.tmp/info/83191a378615495abb4e880166437ffd as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/info/83191a378615495abb4e880166437ffd 2024-12-06T08:16:11,151 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/info/83191a378615495abb4e880166437ffd, entries=20, sequenceid=14, filesize=8.2 K 2024-12-06T08:16:11,152 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/.tmp/table/1881a029d72a46f19a98803af473313d as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/table/1881a029d72a46f19a98803af473313d 2024-12-06T08:16:11,157 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/table/1881a029d72a46f19a98803af473313d, entries=4, sequenceid=14, filesize=5.4 K 2024-12-06T08:16:11,158 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~3.05 KB/3122, heapSize ~5.27 KB/5400, currentSize=0 B/0 for 1588230740 in 73ms, sequenceid=14, compaction requested=false 2024-12-06T08:16:11,162 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-12-06T08:16:11,162 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:16:11,162 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:16:11,162 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:16:11,163 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T08:16:11,274 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-06T08:16:11,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-06T08:16:11,285 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,42867,1733472909616; all regions closed. 2024-12-06T08:16:11,285 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616 2024-12-06T08:16:11,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741834_1010 (size=4570) 2024-12-06T08:16:11,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741834_1010 (size=4570) 2024-12-06T08:16:11,290 DEBUG [RS:0;b6b797fc3981:42867 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/oldWALs 2024-12-06T08:16:11,290 INFO [RS:0;b6b797fc3981:42867 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C42867%2C1733472909616.meta:.meta(num 1733472910367) 2024-12-06T08:16:11,290 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/WALs/b6b797fc3981,42867,1733472909616 2024-12-06T08:16:11,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741847_1023 (size=1545) 2024-12-06T08:16:11,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741847_1023 (size=1545) 2024-12-06T08:16:11,294 DEBUG [RS:0;b6b797fc3981:42867 {}] wal.AbstractFSWAL(1071): Moved 2 WAL file(s) to /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/oldWALs 2024-12-06T08:16:11,294 INFO [RS:0;b6b797fc3981:42867 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C42867%2C1733472909616:(num 1733472971071) 2024-12-06T08:16:11,295 DEBUG [RS:0;b6b797fc3981:42867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:16:11,295 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:16:11,295 INFO [RS:0;b6b797fc3981:42867 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-06T08:16:11,295 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:16:11,296 INFO [RS:0;b6b797fc3981:42867 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42867 2024-12-06T08:16:11,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,42867,1733472909616 2024-12-06T08:16:11,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:16:11,299 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,42867,1733472909616] 2024-12-06T08:16:11,299 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,42867,1733472909616; numProcessing=1 2024-12-06T08:16:11,300 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,42867,1733472909616 already deleted, retry=false 2024-12-06T08:16:11,300 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,42867,1733472909616 expired; onlineServers=0 2024-12-06T08:16:11,300 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,45299,1733472909543' ***** 2024-12-06T08:16:11,301 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:16:11,301 DEBUG [M:0;b6b797fc3981:45299 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b187921, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:16:11,301 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,45299,1733472909543 2024-12-06T08:16:11,301 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,45299,1733472909543; all regions closed. 2024-12-06T08:16:11,301 DEBUG [M:0;b6b797fc3981:45299 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:16:11,301 DEBUG [M:0;b6b797fc3981:45299 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:16:11,301 DEBUG [M:0;b6b797fc3981:45299 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:16:11,301 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:16:11,301 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472909767 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472909767,5,FailOnTimeoutGroup] 2024-12-06T08:16:11,301 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472909767 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472909767,5,FailOnTimeoutGroup] 2024-12-06T08:16:11,301 INFO [M:0;b6b797fc3981:45299 {}] hbase.ChoreService(370): Chore service for: master/b6b797fc3981:0 had [] on shutdown 2024-12-06T08:16:11,301 DEBUG [M:0;b6b797fc3981:45299 {}] master.HMaster(1733): Stopping service threads 2024-12-06T08:16:11,301 INFO [M:0;b6b797fc3981:45299 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:16:11,302 INFO [M:0;b6b797fc3981:45299 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:16:11,302 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:16:11,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:16:11,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:11,303 DEBUG [M:0;b6b797fc3981:45299 {}] zookeeper.ZKUtil(347): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:16:11,303 WARN [M:0;b6b797fc3981:45299 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:16:11,303 INFO [M:0;b6b797fc3981:45299 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T08:16:11,303 INFO [M:0;b6b797fc3981:45299 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:16:11,303 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:16:11,303 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:11,303 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:11,303 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:16:11,303 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:16:11,303 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:11,303 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=65.08 KB heapSize=81.70 KB 2024-12-06T08:16:11,319 DEBUG [M:0;b6b797fc3981:45299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78f1edcdc812497383500a51c2ebd3ae is 82, key is hbase:meta,,1/info:regioninfo/1733472910386/Put/seqid=0 2024-12-06T08:16:11,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741851_1027 (size=5672) 2024-12-06T08:16:11,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741851_1027 (size=5672) 2024-12-06T08:16:11,325 INFO [M:0;b6b797fc3981:45299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78f1edcdc812497383500a51c2ebd3ae 2024-12-06T08:16:11,345 DEBUG [M:0;b6b797fc3981:45299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cd21e28c53242ac9baf2234299de212 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733472911322/Put/seqid=0 2024-12-06T08:16:11,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741852_1028 (size=8356) 2024-12-06T08:16:11,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741852_1028 (size=8356) 2024-12-06T08:16:11,350 INFO [M:0;b6b797fc3981:45299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.48 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cd21e28c53242ac9baf2234299de212 2024-12-06T08:16:11,355 INFO [M:0;b6b797fc3981:45299 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5cd21e28c53242ac9baf2234299de212 2024-12-06T08:16:11,376 DEBUG [M:0;b6b797fc3981:45299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dddada7e76bd4c6188230b47889f1269 is 69, key is b6b797fc3981,42867,1733472909616/rs:state/1733472909860/Put/seqid=0 2024-12-06T08:16:11,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741853_1029 (size=5156) 2024-12-06T08:16:11,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741853_1029 (size=5156) 2024-12-06T08:16:11,381 INFO [M:0;b6b797fc3981:45299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dddada7e76bd4c6188230b47889f1269 2024-12-06T08:16:11,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:16:11,400 INFO [RS:0;b6b797fc3981:42867 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,42867,1733472909616; zookeeper connection closed. 2024-12-06T08:16:11,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42867-0x100666531b40001, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:16:11,400 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@19f50de7 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@19f50de7 2024-12-06T08:16:11,400 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T08:16:11,402 DEBUG [M:0;b6b797fc3981:45299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8be01e6ff049403d988f5871a4f61121 is 52, key is load_balancer_on/state:d/1733472910951/Put/seqid=0 2024-12-06T08:16:11,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741854_1030 (size=5056) 2024-12-06T08:16:11,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741854_1030 (size=5056) 2024-12-06T08:16:11,407 INFO [M:0;b6b797fc3981:45299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8be01e6ff049403d988f5871a4f61121 2024-12-06T08:16:11,412 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/78f1edcdc812497383500a51c2ebd3ae as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/78f1edcdc812497383500a51c2ebd3ae 2024-12-06T08:16:11,417 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/78f1edcdc812497383500a51c2ebd3ae, entries=8, sequenceid=184, filesize=5.5 K 2024-12-06T08:16:11,418 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5cd21e28c53242ac9baf2234299de212 as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5cd21e28c53242ac9baf2234299de212 2024-12-06T08:16:11,423 INFO [M:0;b6b797fc3981:45299 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5cd21e28c53242ac9baf2234299de212 2024-12-06T08:16:11,423 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5cd21e28c53242ac9baf2234299de212, entries=21, sequenceid=184, filesize=8.2 K 2024-12-06T08:16:11,424 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dddada7e76bd4c6188230b47889f1269 as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dddada7e76bd4c6188230b47889f1269 2024-12-06T08:16:11,428 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dddada7e76bd4c6188230b47889f1269, entries=1, sequenceid=184, filesize=5.0 K 2024-12-06T08:16:11,429 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8be01e6ff049403d988f5871a4f61121 as hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8be01e6ff049403d988f5871a4f61121 2024-12-06T08:16:11,433 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40157/user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8be01e6ff049403d988f5871a4f61121, entries=1, sequenceid=184, filesize=4.9 K 2024-12-06T08:16:11,434 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(3040): Finished flush of dataSize ~65.08 KB/66640, heapSize ~81.64 KB/83600, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=184, compaction requested=false 2024-12-06T08:16:11,436 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:11,436 DEBUG [M:0;b6b797fc3981:45299 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:16:11,436 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/30dec068-089c-d148-1289-364c6a61792b/MasterData/WALs/b6b797fc3981,45299,1733472909543 2024-12-06T08:16:11,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34717 is added to blk_1073741830_1006 (size=79161) 2024-12-06T08:16:11,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43811 is added to blk_1073741830_1006 (size=79161) 2024-12-06T08:16:11,438 INFO [M:0;b6b797fc3981:45299 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T08:16:11,438 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:16:11,438 INFO [M:0;b6b797fc3981:45299 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45299 2024-12-06T08:16:11,440 DEBUG [M:0;b6b797fc3981:45299 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b6b797fc3981,45299,1733472909543 already deleted, retry=false 2024-12-06T08:16:11,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:16:11,542 INFO [M:0;b6b797fc3981:45299 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,45299,1733472909543; zookeeper connection closed. 2024-12-06T08:16:11,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45299-0x100666531b40000, quorum=127.0.0.1:50705, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:16:11,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@671ea749{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:16:11,545 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54ce3781{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:16:11,545 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:16:11,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58ac6791{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:16:11,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b944f85{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir/,STOPPED} 2024-12-06T08:16:11,547 WARN [BP-1195693227-172.17.0.2-1733472908825 heartbeating to localhost/127.0.0.1:40157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:16:11,547 WARN [BP-1195693227-172.17.0.2-1733472908825 heartbeating to localhost/127.0.0.1:40157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1195693227-172.17.0.2-1733472908825 (Datanode Uuid 0176b63f-4e79-48a6-9954-5a92bd45854d) service to localhost/127.0.0.1:40157 2024-12-06T08:16:11,547 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:16:11,547 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:16:11,547 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data3/current/BP-1195693227-172.17.0.2-1733472908825 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:16:11,547 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data4/current/BP-1195693227-172.17.0.2-1733472908825 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:16:11,548 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:16:11,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@176cd05e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:16:11,550 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12a372f6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:16:11,550 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:16:11,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fd1989a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:16:11,550 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54f4c6b6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir/,STOPPED} 2024-12-06T08:16:11,552 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:16:11,552 WARN [BP-1195693227-172.17.0.2-1733472908825 heartbeating to localhost/127.0.0.1:40157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:16:11,552 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:16:11,552 WARN [BP-1195693227-172.17.0.2-1733472908825 heartbeating to localhost/127.0.0.1:40157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1195693227-172.17.0.2-1733472908825 (Datanode Uuid 24631b3a-5101-4fc5-966c-b6e267d53edc) service to localhost/127.0.0.1:40157 2024-12-06T08:16:11,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data1/current/BP-1195693227-172.17.0.2-1733472908825 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:16:11,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/cluster_163f29e7-d3e0-9889-5340-a29e52e8b1e7/dfs/data/data2/current/BP-1195693227-172.17.0.2-1733472908825 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:16:11,553 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:16:11,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5657bbb8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:16:11,559 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@649a505c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:16:11,559 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:16:11,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8d103eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:16:11,560 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11f3953b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir/,STOPPED} 2024-12-06T08:16:11,566 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:16:11,592 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T08:16:11,599 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=110 (was 100) - Thread LEAK? -, OpenFileDescriptor=465 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=110 (was 112), ProcessCount=11 (was 11), AvailableMemoryMB=7867 (was 8024) 2024-12-06T08:16:11,606 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=111, OpenFileDescriptor=465, MaxFileDescriptor=1048576, SystemLoadAverage=110, ProcessCount=11, AvailableMemoryMB=7867 2024-12-06T08:16:11,606 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:16:11,606 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.log.dir so I do NOT create it in target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19 2024-12-06T08:16:11,606 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/0e5ee045-42a3-a3ac-7c50-31dfe97a8213/hadoop.tmp.dir so I do NOT create it in target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19 2024-12-06T08:16:11,606 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522, deleteOnExit=true 2024-12-06T08:16:11,606 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/test.cache.data in system properties and HBase conf 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T08:16:11,607 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:16:11,607 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:16:11,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:16:11,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:16:11,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:16:11,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:16:11,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:16:11,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:16:11,608 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:16:11,609 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:16:11,609 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:16:11,609 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:16:11,609 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:16:11,609 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:16:11,628 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:16:11,695 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:16:11,699 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:16:11,700 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:16:11,700 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:16:11,701 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:16:11,701 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:16:11,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8418220{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:16:11,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7cef3938{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:16:11,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f3b8626{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/java.io.tmpdir/jetty-localhost-40129-hadoop-hdfs-3_4_1-tests_jar-_-any-8302244222578403427/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:16:11,816 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21b074b4{HTTP/1.1, (http/1.1)}{localhost:40129} 2024-12-06T08:16:11,816 INFO [Time-limited test {}] server.Server(415): Started @286199ms 2024-12-06T08:16:11,829 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:16:11,877 INFO [regionserver/b6b797fc3981:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:16:11,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:11,885 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:16:11,888 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:16:11,888 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:16:11,888 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:16:11,888 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:16:11,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34331c55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:16:11,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e98bd1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:16:12,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@325f9d5d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/java.io.tmpdir/jetty-localhost-37027-hadoop-hdfs-3_4_1-tests_jar-_-any-17445788255033644806/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:16:12,004 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1df1886a{HTTP/1.1, (http/1.1)}{localhost:37027} 2024-12-06T08:16:12,004 INFO [Time-limited test {}] server.Server(415): Started @286387ms 2024-12-06T08:16:12,006 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:16:12,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:16:12,036 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:16:12,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:16:12,037 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:16:12,037 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:16:12,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@339b423a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:16:12,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@682fde91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:16:12,100 WARN [Thread-1687 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data2/current/BP-294598891-172.17.0.2-1733472971637/current, will proceed with Du for space computation calculation, 2024-12-06T08:16:12,100 WARN [Thread-1686 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data1/current/BP-294598891-172.17.0.2-1733472971637/current, will proceed with Du for space computation calculation, 2024-12-06T08:16:12,123 WARN [Thread-1665 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:16:12,126 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80dd7049185a73a2 with lease ID 0x2d8d86a3d181a7a6: Processing first storage report for DS-02ea1611-e723-4553-a45b-1d00d87e8a11 from datanode DatanodeRegistration(127.0.0.1:44155, datanodeUuid=68e91242-1fe4-4e4e-86ef-b8bee2f0ebe0, infoPort=41541, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637) 2024-12-06T08:16:12,126 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80dd7049185a73a2 with lease ID 0x2d8d86a3d181a7a6: from storage DS-02ea1611-e723-4553-a45b-1d00d87e8a11 node DatanodeRegistration(127.0.0.1:44155, datanodeUuid=68e91242-1fe4-4e4e-86ef-b8bee2f0ebe0, infoPort=41541, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:16:12,126 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x80dd7049185a73a2 with lease ID 0x2d8d86a3d181a7a6: Processing first storage report for DS-8344b34a-6fff-47e0-8eda-ae8dfe498a04 from datanode DatanodeRegistration(127.0.0.1:44155, datanodeUuid=68e91242-1fe4-4e4e-86ef-b8bee2f0ebe0, infoPort=41541, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637) 2024-12-06T08:16:12,126 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x80dd7049185a73a2 with lease ID 0x2d8d86a3d181a7a6: from storage DS-8344b34a-6fff-47e0-8eda-ae8dfe498a04 node DatanodeRegistration(127.0.0.1:44155, datanodeUuid=68e91242-1fe4-4e4e-86ef-b8bee2f0ebe0, infoPort=41541, infoSecurePort=0, ipcPort=34023, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:16:12,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b425372{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/java.io.tmpdir/jetty-localhost-35801-hadoop-hdfs-3_4_1-tests_jar-_-any-13686103246907069018/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:16:12,155 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c86dd56{HTTP/1.1, (http/1.1)}{localhost:35801} 2024-12-06T08:16:12,155 INFO [Time-limited test {}] server.Server(415): Started @286538ms 2024-12-06T08:16:12,156 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:16:12,249 WARN [Thread-1713 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data4/current/BP-294598891-172.17.0.2-1733472971637/current, will proceed with Du for space computation calculation, 2024-12-06T08:16:12,249 WARN [Thread-1712 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data3/current/BP-294598891-172.17.0.2-1733472971637/current, will proceed with Du for space computation calculation, 2024-12-06T08:16:12,277 WARN [Thread-1701 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:16:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f75d1ad546620e3 with lease ID 0x2d8d86a3d181a7a7: Processing first storage report for DS-99c884da-5dfc-4959-9003-94b862df1fc0 from datanode DatanodeRegistration(127.0.0.1:33427, datanodeUuid=331a1894-778f-4031-ba25-24c4424b1549, infoPort=35783, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637) 2024-12-06T08:16:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f75d1ad546620e3 with lease ID 0x2d8d86a3d181a7a7: from storage DS-99c884da-5dfc-4959-9003-94b862df1fc0 node DatanodeRegistration(127.0.0.1:33427, datanodeUuid=331a1894-778f-4031-ba25-24c4424b1549, infoPort=35783, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:16:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f75d1ad546620e3 with lease ID 0x2d8d86a3d181a7a7: Processing first storage report for DS-276a6db6-29ba-4107-b84d-fdcdc5207432 from datanode DatanodeRegistration(127.0.0.1:33427, datanodeUuid=331a1894-778f-4031-ba25-24c4424b1549, infoPort=35783, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637) 2024-12-06T08:16:12,279 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f75d1ad546620e3 with lease ID 0x2d8d86a3d181a7a7: from storage DS-276a6db6-29ba-4107-b84d-fdcdc5207432 node DatanodeRegistration(127.0.0.1:33427, datanodeUuid=331a1894-778f-4031-ba25-24c4424b1549, infoPort=35783, infoSecurePort=0, ipcPort=42413, storageInfo=lv=-57;cid=testClusterID;nsid=517610989;c=1733472971637), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:16:12,379 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19 2024-12-06T08:16:12,381 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/zookeeper_0, clientPort=62431, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:16:12,382 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62431 2024-12-06T08:16:12,383 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:12,384 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:12,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:16:12,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:16:12,394 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418 with version=8 2024-12-06T08:16:12,394 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/hbase-staging 2024-12-06T08:16:12,396 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:16:12,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:16:12,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:16:12,397 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:16:12,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:16:12,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:16:12,397 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:16:12,397 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:16:12,398 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42321 2024-12-06T08:16:12,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:12,399 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:12,401 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:42321 connecting to ZooKeeper ensemble=127.0.0.1:62431 2024-12-06T08:16:12,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:423210x0, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:16:12,409 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42321-0x100666627430000 connected 2024-12-06T08:16:12,421 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:16:12,421 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:16:12,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:16:12,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42321 2024-12-06T08:16:12,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42321 2024-12-06T08:16:12,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42321 2024-12-06T08:16:12,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42321 2024-12-06T08:16:12,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42321 2024-12-06T08:16:12,424 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418, hbase.cluster.distributed=false 2024-12-06T08:16:12,440 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:16:12,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:16:12,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:16:12,440 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:16:12,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:16:12,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:16:12,440 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:16:12,441 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:16:12,441 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41981 2024-12-06T08:16:12,442 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:16:12,442 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:16:12,443 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:12,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:12,447 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41981 connecting to ZooKeeper ensemble=127.0.0.1:62431 2024-12-06T08:16:12,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419810x0, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:16:12,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419810x0, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:16:12,450 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41981-0x100666627430001 connected 2024-12-06T08:16:12,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:16:12,452 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:16:12,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41981 2024-12-06T08:16:12,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41981 2024-12-06T08:16:12,455 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41981 2024-12-06T08:16:12,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41981 2024-12-06T08:16:12,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41981 2024-12-06T08:16:12,459 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b6b797fc3981,42321,1733472972396 2024-12-06T08:16:12,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:16:12,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:16:12,461 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b6b797fc3981,42321,1733472972396 2024-12-06T08:16:12,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:16:12,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:16:12,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,464 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:16:12,464 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b6b797fc3981,42321,1733472972396 from backup master directory 2024-12-06T08:16:12,464 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:16:12,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b6b797fc3981,42321,1733472972396 2024-12-06T08:16:12,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:16:12,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:16:12,465 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:16:12,465 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b6b797fc3981,42321,1733472972396 2024-12-06T08:16:12,472 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b6b797fc3981:42321 2024-12-06T08:16:12,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:16:12,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:16:12,483 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/hbase.id with ID: 4139a412-2158-4eba-b6d5-8e5f9ef9cbec 2024-12-06T08:16:12,493 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:12,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:16:12,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:16:12,507 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:16:12,508 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:16:12,510 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:16:12,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:16:12,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:16:12,523 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store 2024-12-06T08:16:12,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:16:12,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:16:12,532 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:12,532 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:16:12,532 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:12,532 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:12,532 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:16:12,532 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:12,532 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:16:12,532 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:16:12,533 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/.initializing 2024-12-06T08:16:12,533 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/WALs/b6b797fc3981,42321,1733472972396 2024-12-06T08:16:12,536 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C42321%2C1733472972396, suffix=, logDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/WALs/b6b797fc3981,42321,1733472972396, archiveDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/oldWALs, maxLogs=10 2024-12-06T08:16:12,537 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C42321%2C1733472972396.1733472972537 2024-12-06T08:16:12,544 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/WALs/b6b797fc3981,42321,1733472972396/b6b797fc3981%2C42321%2C1733472972396.1733472972537 2024-12-06T08:16:12,544 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41541:41541),(127.0.0.1/127.0.0.1:35783:35783)] 2024-12-06T08:16:12,544 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:16:12,544 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:12,544 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,544 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,548 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,549 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:16:12,550 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:16:12,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,551 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:16:12,552 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:16:12,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:16:12,553 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:16:12,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:16:12,555 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,555 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:16:12,556 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,556 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,558 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:16:12,559 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:16:12,560 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:16:12,561 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832610, jitterRate=0.05871959030628204}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:16:12,561 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:16:12,562 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:16:12,565 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73365b1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:16:12,565 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T08:16:12,566 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:16:12,566 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:16:12,566 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:16:12,566 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T08:16:12,566 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T08:16:12,566 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:16:12,571 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:16:12,571 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:16:12,573 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:16:12,573 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:16:12,574 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:16:12,575 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:16:12,575 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:16:12,576 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:16:12,577 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:16:12,577 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:16:12,578 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:16:12,580 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:16:12,581 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:16:12,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:16:12,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:16:12,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,583 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b6b797fc3981,42321,1733472972396, sessionid=0x100666627430000, setting cluster-up flag (Was=false) 2024-12-06T08:16:12,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,589 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:16:12,590 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,42321,1733472972396 2024-12-06T08:16:12,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:12,597 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:16:12,598 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,42321,1733472972396 2024-12-06T08:16:12,600 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T08:16:12,600 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T08:16:12,600 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b6b797fc3981,42321,1733472972396 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b6b797fc3981:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:16:12,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,607 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733473002607 2024-12-06T08:16:12,607 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:16:12,607 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:16:12,607 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:16:12,607 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:16:12,607 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:16:12,607 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:16:12,607 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:16:12,607 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:16:12,608 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,608 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:16:12,608 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:16:12,608 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:16:12,608 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:16:12,608 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:16:12,608 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,609 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472972608,5,FailOnTimeoutGroup] 2024-12-06T08:16:12,608 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:16:12,609 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472972609,5,FailOnTimeoutGroup] 2024-12-06T08:16:12,609 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,609 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:16:12,609 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,609 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:16:12,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:16:12,615 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T08:16:12,616 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418 2024-12-06T08:16:12,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:16:12,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:16:12,625 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:12,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:16:12,628 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:16:12,628 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:16:12,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:16:12,629 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:16:12,629 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:16:12,630 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:16:12,631 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:16:12,631 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:12,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:16:12,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740 2024-12-06T08:16:12,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740 2024-12-06T08:16:12,633 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:16:12,634 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:16:12,636 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:16:12,636 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=786387, jitterRate=-5.8397650718688965E-5}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:16:12,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:16:12,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:16:12,637 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:16:12,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:16:12,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:16:12,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:16:12,637 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:16:12,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:16:12,638 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:16:12,638 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T08:16:12,638 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:16:12,639 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:16:12,640 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:16:12,670 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b6b797fc3981:41981 2024-12-06T08:16:12,671 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1008): ClusterId : 4139a412-2158-4eba-b6d5-8e5f9ef9cbec 2024-12-06T08:16:12,671 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:16:12,673 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:16:12,673 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:16:12,675 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:16:12,675 DEBUG [RS:0;b6b797fc3981:41981 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ca44735, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:16:12,675 DEBUG [RS:0;b6b797fc3981:41981 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@338ef47a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:16:12,675 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:16:12,675 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:16:12,675 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:16:12,676 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,42321,1733472972396 with isa=b6b797fc3981/172.17.0.2:41981, startcode=1733472972440 2024-12-06T08:16:12,676 DEBUG [RS:0;b6b797fc3981:41981 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:16:12,678 INFO [RS-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37053, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:16:12,678 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42321 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,41981,1733472972440 2024-12-06T08:16:12,678 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42321 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:12,680 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418 2024-12-06T08:16:12,680 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:35589 2024-12-06T08:16:12,680 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:16:12,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:16:12,681 DEBUG [RS:0;b6b797fc3981:41981 {}] zookeeper.ZKUtil(111): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,41981,1733472972440 2024-12-06T08:16:12,681 WARN [RS:0;b6b797fc3981:41981 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:16:12,682 INFO [RS:0;b6b797fc3981:41981 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:16:12,682 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440 2024-12-06T08:16:12,682 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,41981,1733472972440] 2024-12-06T08:16:12,684 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:16:12,685 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:16:12,686 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:16:12,686 INFO [RS:0;b6b797fc3981:41981 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:16:12,687 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,687 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:16:12,687 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,687 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:16:12,688 DEBUG [RS:0;b6b797fc3981:41981 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:16:12,688 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,688 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,688 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,688 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,688 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,41981,1733472972440-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:16:12,703 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:16:12,703 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,41981,1733472972440-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:12,717 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.Replication(204): b6b797fc3981,41981,1733472972440 started 2024-12-06T08:16:12,717 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,41981,1733472972440, RpcServer on b6b797fc3981/172.17.0.2:41981, sessionid=0x100666627430001 2024-12-06T08:16:12,717 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:16:12,717 DEBUG [RS:0;b6b797fc3981:41981 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,41981,1733472972440 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,41981,1733472972440' 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,41981,1733472972440 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,41981,1733472972440' 2024-12-06T08:16:12,718 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:16:12,719 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:16:12,719 DEBUG [RS:0;b6b797fc3981:41981 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:16:12,719 INFO [RS:0;b6b797fc3981:41981 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:16:12,719 INFO [RS:0;b6b797fc3981:41981 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:16:12,790 WARN [b6b797fc3981:42321 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:16:12,821 INFO [RS:0;b6b797fc3981:41981 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C41981%2C1733472972440, suffix=, logDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440, archiveDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/oldWALs, maxLogs=32 2024-12-06T08:16:12,821 INFO [RS:0;b6b797fc3981:41981 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C41981%2C1733472972440.1733472972821 2024-12-06T08:16:12,827 INFO [RS:0;b6b797fc3981:41981 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733472972821 2024-12-06T08:16:12,827 DEBUG [RS:0;b6b797fc3981:41981 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41541:41541),(127.0.0.1/127.0.0.1:35783:35783)] 2024-12-06T08:16:12,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:13,040 DEBUG [b6b797fc3981:42321 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T08:16:13,041 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:13,042 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,41981,1733472972440, state=OPENING 2024-12-06T08:16:13,043 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:16:13,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:13,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:13,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b6b797fc3981,41981,1733472972440}] 2024-12-06T08:16:13,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:16:13,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:16:13,197 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,41981,1733472972440 2024-12-06T08:16:13,198 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:16:13,199 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53674, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:16:13,203 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T08:16:13,203 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:16:13,204 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C41981%2C1733472972440.meta, suffix=.meta, logDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440, archiveDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/oldWALs, maxLogs=32 2024-12-06T08:16:13,205 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C41981%2C1733472972440.meta.1733472973205.meta 2024-12-06T08:16:13,212 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.meta.1733472973205.meta 2024-12-06T08:16:13,212 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41541:41541),(127.0.0.1/127.0.0.1:35783:35783)] 2024-12-06T08:16:13,212 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:16:13,213 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:16:13,213 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:16:13,213 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:16:13,213 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:16:13,213 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:13,213 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T08:16:13,213 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T08:16:13,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:16:13,215 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:16:13,215 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:13,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:16:13,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:16:13,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:16:13,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:13,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:16:13,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:16:13,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:16:13,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:13,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:16:13,219 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740 2024-12-06T08:16:13,220 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740 2024-12-06T08:16:13,221 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:16:13,222 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:16:13,223 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805856, jitterRate=0.024699047207832336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:16:13,223 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:16:13,224 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733472973197 2024-12-06T08:16:13,225 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:16:13,225 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T08:16:13,226 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:13,226 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,41981,1733472972440, state=OPEN 2024-12-06T08:16:13,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:16:13,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:16:13,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:16:13,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:16:13,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:16:13,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b6b797fc3981,41981,1733472972440 in 185 msec 2024-12-06T08:16:13,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:16:13,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 594 msec 2024-12-06T08:16:13,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 635 msec 2024-12-06T08:16:13,235 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733472973235, completionTime=-1 2024-12-06T08:16:13,235 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T08:16:13,235 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T08:16:13,236 DEBUG [hconnection-0x24b60a2a-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:16:13,237 INFO [RS-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:16:13,238 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T08:16:13,238 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733473033238 2024-12-06T08:16:13,238 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733473093238 2024-12-06T08:16:13,238 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-06T08:16:13,244 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42321,1733472972396-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:13,244 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42321,1733472972396-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:13,244 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42321,1733472972396-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:13,244 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b6b797fc3981:42321, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:13,244 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:13,244 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T08:16:13,245 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:16:13,246 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T08:16:13,246 DEBUG [master/b6b797fc3981:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T08:16:13,247 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:16:13,247 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:13,247 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:16:13,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:16:13,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:16:13,256 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 14018a782b624965e782ac68a947637a, NAME => 'hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418 2024-12-06T08:16:13,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:16:13,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:16:13,262 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:13,262 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 14018a782b624965e782ac68a947637a, disabling compactions & flushes 2024-12-06T08:16:13,262 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:16:13,262 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:16:13,262 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. after waiting 0 ms 2024-12-06T08:16:13,262 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:16:13,262 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:16:13,262 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 14018a782b624965e782ac68a947637a: 2024-12-06T08:16:13,264 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:16:13,264 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733472973264"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472973264"}]},"ts":"1733472973264"} 2024-12-06T08:16:13,266 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:16:13,267 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:16:13,267 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472973267"}]},"ts":"1733472973267"} 2024-12-06T08:16:13,268 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T08:16:13,273 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=14018a782b624965e782ac68a947637a, ASSIGN}] 2024-12-06T08:16:13,274 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=14018a782b624965e782ac68a947637a, ASSIGN 2024-12-06T08:16:13,275 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=14018a782b624965e782ac68a947637a, ASSIGN; state=OFFLINE, location=b6b797fc3981,41981,1733472972440; forceNewPlan=false, retain=false 2024-12-06T08:16:13,425 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=14018a782b624965e782ac68a947637a, regionState=OPENING, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:13,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 14018a782b624965e782ac68a947637a, server=b6b797fc3981,41981,1733472972440}] 2024-12-06T08:16:13,579 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,41981,1733472972440 2024-12-06T08:16:13,583 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:16:13,583 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 14018a782b624965e782ac68a947637a, NAME => 'hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:16:13,583 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 14018a782b624965e782ac68a947637a 2024-12-06T08:16:13,584 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:13,584 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 14018a782b624965e782ac68a947637a 2024-12-06T08:16:13,584 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 14018a782b624965e782ac68a947637a 2024-12-06T08:16:13,585 INFO [StoreOpener-14018a782b624965e782ac68a947637a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 14018a782b624965e782ac68a947637a 2024-12-06T08:16:13,586 INFO [StoreOpener-14018a782b624965e782ac68a947637a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14018a782b624965e782ac68a947637a columnFamilyName info 2024-12-06T08:16:13,586 DEBUG [StoreOpener-14018a782b624965e782ac68a947637a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:13,587 INFO [StoreOpener-14018a782b624965e782ac68a947637a-1 {}] regionserver.HStore(327): Store=14018a782b624965e782ac68a947637a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:16:13,588 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a 2024-12-06T08:16:13,588 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a 2024-12-06T08:16:13,590 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 14018a782b624965e782ac68a947637a 2024-12-06T08:16:13,592 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:16:13,592 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 14018a782b624965e782ac68a947637a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=765208, jitterRate=-0.026988178491592407}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:16:13,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 14018a782b624965e782ac68a947637a: 2024-12-06T08:16:13,594 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a., pid=6, masterSystemTime=1733472973579 2024-12-06T08:16:13,596 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:16:13,596 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:16:13,596 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=14018a782b624965e782ac68a947637a, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:13,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:16:13,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 14018a782b624965e782ac68a947637a, server=b6b797fc3981,41981,1733472972440 in 171 msec 2024-12-06T08:16:13,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:16:13,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=14018a782b624965e782ac68a947637a, ASSIGN in 326 msec 2024-12-06T08:16:13,602 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:16:13,602 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472973602"}]},"ts":"1733472973602"} 2024-12-06T08:16:13,603 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T08:16:13,606 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:16:13,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 361 msec 2024-12-06T08:16:13,647 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T08:16:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:16:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:16:13,653 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T08:16:13,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:16:13,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 9 msec 2024-12-06T08:16:13,664 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T08:16:13,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:16:13,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 9 msec 2024-12-06T08:16:13,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T08:16:13,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.214sec 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42321,1733472972396-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:16:13,680 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42321,1733472972396-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:16:13,682 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:16:13,682 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:16:13,682 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,42321,1733472972396-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:16:13,761 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x314c3bcb to 127.0.0.1:62431 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2494f524 2024-12-06T08:16:13,764 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@292b58e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:16:13,766 DEBUG [hconnection-0x7086c45a-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:16:13,768 INFO [RS-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:16:13,769 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b6b797fc3981,42321,1733472972396 2024-12-06T08:16:13,769 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:16:13,771 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T08:16:13,772 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-06T08:16:13,775 INFO [RS-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-06T08:16:13,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42321 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-06T08:16:13,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42321 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-06T08:16:13,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42321 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:16:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42321 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-06T08:16:13,778 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:16:13,778 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:13,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42321 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 9 2024-12-06T08:16:13,779 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:16:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42321 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:16:13,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741837_1013 (size=381) 2024-12-06T08:16:13,787 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 68425bdac859c5f4533d3ba294e450ac, NAME => 'TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418 2024-12-06T08:16:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741837_1013 (size=381) 2024-12-06T08:16:13,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741838_1014 (size=64) 2024-12-06T08:16:13,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741838_1014 (size=64) 2024-12-06T08:16:13,794 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:13,794 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1681): Closing 68425bdac859c5f4533d3ba294e450ac, disabling compactions & flushes 2024-12-06T08:16:13,794 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:13,794 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:13,794 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. after waiting 0 ms 2024-12-06T08:16:13,794 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:13,794 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:13,794 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1635): Region close journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:13,795 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:16:13,795 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733472973795"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733472973795"}]},"ts":"1733472973795"} 2024-12-06T08:16:13,797 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:16:13,798 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:16:13,798 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472973798"}]},"ts":"1733472973798"} 2024-12-06T08:16:13,799 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-06T08:16:13,802 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac, ASSIGN}] 2024-12-06T08:16:13,803 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac, ASSIGN 2024-12-06T08:16:13,804 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac, ASSIGN; state=OFFLINE, location=b6b797fc3981,41981,1733472972440; forceNewPlan=false, retain=false 2024-12-06T08:16:13,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:13,955 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=68425bdac859c5f4533d3ba294e450ac, regionState=OPENING, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:13,956 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440}] 2024-12-06T08:16:14,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to b6b797fc3981,41981,1733472972440 2024-12-06T08:16:14,112 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:14,113 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 68425bdac859c5f4533d3ba294e450ac, NAME => 'TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:16:14,113 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:14,113 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:14,113 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:14,113 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:14,114 INFO [StoreOpener-68425bdac859c5f4533d3ba294e450ac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:14,116 INFO [StoreOpener-68425bdac859c5f4533d3ba294e450ac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 68425bdac859c5f4533d3ba294e450ac columnFamilyName info 2024-12-06T08:16:14,116 DEBUG [StoreOpener-68425bdac859c5f4533d3ba294e450ac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:14,116 INFO [StoreOpener-68425bdac859c5f4533d3ba294e450ac-1 {}] regionserver.HStore(327): Store=68425bdac859c5f4533d3ba294e450ac/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:16:14,117 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:14,117 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:14,119 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:14,121 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:16:14,121 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 68425bdac859c5f4533d3ba294e450ac; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779746, jitterRate=-0.008502334356307983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:16:14,122 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:14,123 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac., pid=11, masterSystemTime=1733472974108 2024-12-06T08:16:14,124 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:14,125 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:14,125 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=68425bdac859c5f4533d3ba294e450ac, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:14,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-06T08:16:14,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 in 171 msec 2024-12-06T08:16:14,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-06T08:16:14,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac, ASSIGN in 327 msec 2024-12-06T08:16:14,131 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:16:14,131 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733472974131"}]},"ts":"1733472974131"} 2024-12-06T08:16:14,133 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-06T08:16:14,135 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:16:14,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestLogRolling-testLogRolling in 359 msec 2024-12-06T08:16:14,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:15,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:16,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,105 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,611 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:16:16,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:16,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:17,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:18,685 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-06T08:16:18,685 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-06T08:16:18,686 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-06T08:16:18,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:19,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:20,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:21,274 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-06T08:16:21,274 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-06T08:16:21,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-06T08:16:21,275 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-06T08:16:21,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:22,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42321 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-06T08:16:23,780 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling, procId: 9 completed 2024-12-06T08:16:23,783 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-06T08:16:23,783 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:23,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:23,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 68425bdac859c5f4533d3ba294e450ac 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:16:23,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/50d2c9b97928426e9c9f00d46292c200 is 1080, key is row0001/info:/1733472983786/Put/seqid=0 2024-12-06T08:16:23,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:16:23,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53700 deadline: 1733472993816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:23,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741839_1015 (size=12509) 2024-12-06T08:16:23,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741839_1015 (size=12509) 2024-12-06T08:16:23,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/50d2c9b97928426e9c9f00d46292c200 2024-12-06T08:16:23,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/50d2c9b97928426e9c9f00d46292c200 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/50d2c9b97928426e9c9f00d46292c200 2024-12-06T08:16:23,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/50d2c9b97928426e9c9f00d46292c200, entries=7, sequenceid=11, filesize=12.2 K 2024-12-06T08:16:23,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 68425bdac859c5f4533d3ba294e450ac in 42ms, sequenceid=11, compaction requested=false 2024-12-06T08:16:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:23,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:24,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:25,713 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1fe245b8d8375ab9b801a570a1b2b5e8, had cached 0 bytes from a total of 23930 2024-12-06T08:16:25,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:26,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:27,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:28,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:29,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:30,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:31,592 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=15, reused chunk count=36, reuseRatio=70.59% 2024-12-06T08:16:31,592 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-06T08:16:31,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:32,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:33,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:33,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:33,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 68425bdac859c5f4533d3ba294e450ac 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T08:16:33,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/6adaea2a51b4468e844b6b8103de0329 is 1080, key is row0008/info:/1733472983795/Put/seqid=0 2024-12-06T08:16:33,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741840_1016 (size=29761) 2024-12-06T08:16:33,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741840_1016 (size=29761) 2024-12-06T08:16:33,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/6adaea2a51b4468e844b6b8103de0329 2024-12-06T08:16:33,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/6adaea2a51b4468e844b6b8103de0329 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329 2024-12-06T08:16:33,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329, entries=23, sequenceid=37, filesize=29.1 K 2024-12-06T08:16:33,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 68425bdac859c5f4533d3ba294e450ac in 26ms, sequenceid=37, compaction requested=false 2024-12-06T08:16:33,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:33,945 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=41.3 K, sizeToCheck=16.0 K 2024-12-06T08:16:33,945 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:16:33,945 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329 because midkey is the same as first or last row 2024-12-06T08:16:34,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:35,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:35,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 68425bdac859c5f4533d3ba294e450ac 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:16:35,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/e4b86b911e454c8496d02d420b7af310 is 1080, key is row0031/info:/1733472993919/Put/seqid=0 2024-12-06T08:16:35,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741841_1017 (size=12509) 2024-12-06T08:16:35,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741841_1017 (size=12509) 2024-12-06T08:16:35,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/e4b86b911e454c8496d02d420b7af310 2024-12-06T08:16:35,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/e4b86b911e454c8496d02d420b7af310 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/e4b86b911e454c8496d02d420b7af310 2024-12-06T08:16:35,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:16:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53700 deadline: 1733473005952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:35,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/e4b86b911e454c8496d02d420b7af310, entries=7, sequenceid=47, filesize=12.2 K 2024-12-06T08:16:35,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 68425bdac859c5f4533d3ba294e450ac in 29ms, sequenceid=47, compaction requested=true 2024-12-06T08:16:35,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:35,957 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=53.5 K, sizeToCheck=16.0 K 2024-12-06T08:16:35,957 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:16:35,957 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329 because midkey is the same as first or last row 2024-12-06T08:16:35,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68425bdac859c5f4533d3ba294e450ac:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:16:35,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:35,957 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:16:35,958 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:16:35,958 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): 68425bdac859c5f4533d3ba294e450ac/info is initiating minor compaction (all files) 2024-12-06T08:16:35,958 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 68425bdac859c5f4533d3ba294e450ac/info in TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:35,958 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/50d2c9b97928426e9c9f00d46292c200, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/e4b86b911e454c8496d02d420b7af310] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp, totalSize=53.5 K 2024-12-06T08:16:35,959 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50d2c9b97928426e9c9f00d46292c200, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733472983786 2024-12-06T08:16:35,959 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6adaea2a51b4468e844b6b8103de0329, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733472983795 2024-12-06T08:16:35,959 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4b86b911e454c8496d02d420b7af310, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733472993919 2024-12-06T08:16:35,979 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68425bdac859c5f4533d3ba294e450ac#info#compaction#41 average throughput is 9.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:16:35,980 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/8c9c687afc6f4ecd9b83facb691ccb68 is 1080, key is row0001/info:/1733472983786/Put/seqid=0 2024-12-06T08:16:35,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741842_1018 (size=44978) 2024-12-06T08:16:35,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741842_1018 (size=44978) 2024-12-06T08:16:35,992 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/8c9c687afc6f4ecd9b83facb691ccb68 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68 2024-12-06T08:16:35,998 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 68425bdac859c5f4533d3ba294e450ac/info of 68425bdac859c5f4533d3ba294e450ac into 8c9c687afc6f4ecd9b83facb691ccb68(size=43.9 K), total size for store is 43.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:16:35,998 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:35,998 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac., storeName=68425bdac859c5f4533d3ba294e450ac/info, priority=13, startTime=1733472995957; duration=0sec 2024-12-06T08:16:35,998 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=43.9 K, sizeToCheck=16.0 K 2024-12-06T08:16:35,998 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:16:35,998 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68 because midkey is the same as first or last row 2024-12-06T08:16:35,998 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:35,998 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68425bdac859c5f4533d3ba294e450ac:info 2024-12-06T08:16:36,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:37,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:38,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,214 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,727 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:16:38,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,753 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:38,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:39,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:40,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:41,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:42,379 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:16:42,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:43,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:44,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:45,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:46,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 68425bdac859c5f4533d3ba294e450ac 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T08:16:46,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/f8baadd44bfe4b3e812358b918c1a39d is 1080, key is row0038/info:/1733472995927/Put/seqid=0 2024-12-06T08:16:46,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741843_1019 (size=29761) 2024-12-06T08:16:46,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741843_1019 (size=29761) 2024-12-06T08:16:46,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/f8baadd44bfe4b3e812358b918c1a39d 2024-12-06T08:16:46,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/f8baadd44bfe4b3e812358b918c1a39d as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f8baadd44bfe4b3e812358b918c1a39d 2024-12-06T08:16:46,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f8baadd44bfe4b3e812358b918c1a39d, entries=23, sequenceid=74, filesize=29.1 K 2024-12-06T08:16:46,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=4.20 KB/4304 for 68425bdac859c5f4533d3ba294e450ac in 24ms, sequenceid=74, compaction requested=false 2024-12-06T08:16:46,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:46,069 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=73.0 K, sizeToCheck=16.0 K 2024-12-06T08:16:46,069 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:16:46,069 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68 because midkey is the same as first or last row 2024-12-06T08:16:46,675 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:16:46,677 INFO [RS-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:16:46,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:47,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:48,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 68425bdac859c5f4533d3ba294e450ac 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:16:48,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/53f36f7cb7b748d3ae9edbac7f30fc9e is 1080, key is row0061/info:/1733473006045/Put/seqid=0 2024-12-06T08:16:48,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741844_1020 (size=12509) 2024-12-06T08:16:48,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741844_1020 (size=12509) 2024-12-06T08:16:48,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/53f36f7cb7b748d3ae9edbac7f30fc9e 2024-12-06T08:16:48,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/53f36f7cb7b748d3ae9edbac7f30fc9e as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/53f36f7cb7b748d3ae9edbac7f30fc9e 2024-12-06T08:16:48,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:16:48,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53700 deadline: 1733473018079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/53f36f7cb7b748d3ae9edbac7f30fc9e, entries=7, sequenceid=84, filesize=12.2 K 2024-12-06T08:16:48,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 68425bdac859c5f4533d3ba294e450ac in 30ms, sequenceid=84, compaction requested=true 2024-12-06T08:16:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=85.2 K, sizeToCheck=16.0 K 2024-12-06T08:16:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:16:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68 because midkey is the same as first or last row 2024-12-06T08:16:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 68425bdac859c5f4533d3ba294e450ac:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:16:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:48,085 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:16:48,085 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 87248 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:16:48,086 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): 68425bdac859c5f4533d3ba294e450ac/info is initiating minor compaction (all files) 2024-12-06T08:16:48,086 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 68425bdac859c5f4533d3ba294e450ac/info in TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:48,086 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f8baadd44bfe4b3e812358b918c1a39d, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/53f36f7cb7b748d3ae9edbac7f30fc9e] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp, totalSize=85.2 K 2024-12-06T08:16:48,086 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c9c687afc6f4ecd9b83facb691ccb68, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733472983786 2024-12-06T08:16:48,086 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8baadd44bfe4b3e812358b918c1a39d, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733472995927 2024-12-06T08:16:48,087 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53f36f7cb7b748d3ae9edbac7f30fc9e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733473006045 2024-12-06T08:16:48,101 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 68425bdac859c5f4533d3ba294e450ac#info#compaction#44 average throughput is 17.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:16:48,101 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/f0786242974c4c07837517d883be292a is 1080, key is row0001/info:/1733472983786/Put/seqid=0 2024-12-06T08:16:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741845_1021 (size=77532) 2024-12-06T08:16:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741845_1021 (size=77532) 2024-12-06T08:16:48,111 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/f0786242974c4c07837517d883be292a as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a 2024-12-06T08:16:48,116 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 68425bdac859c5f4533d3ba294e450ac/info of 68425bdac859c5f4533d3ba294e450ac into f0786242974c4c07837517d883be292a(size=75.7 K), total size for store is 75.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:16:48,116 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:48,116 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac., storeName=68425bdac859c5f4533d3ba294e450ac/info, priority=13, startTime=1733473008084; duration=0sec 2024-12-06T08:16:48,116 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(109): Should split because info size=75.7 K, sizeToCheck=16.0 K 2024-12-06T08:16:48,116 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-06T08:16:48,117 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:48,118 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:48,118 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 68425bdac859c5f4533d3ba294e450ac:info 2024-12-06T08:16:48,119 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42321 {}] assignment.AssignmentManager(1346): Split request from b6b797fc3981,41981,1733472972440, parent={ENCODED => 68425bdac859c5f4533d3ba294e450ac, NAME => 'TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-06T08:16:48,123 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42321 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,127 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42321 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=68425bdac859c5f4533d3ba294e450ac, daughterA=6be54277077587710f4f7bf99a76fc14, daughterB=a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,128 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=68425bdac859c5f4533d3ba294e450ac, daughterA=6be54277077587710f4f7bf99a76fc14, daughterB=a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,128 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=68425bdac859c5f4533d3ba294e450ac, daughterA=6be54277077587710f4f7bf99a76fc14, daughterB=a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,128 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=12, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=68425bdac859c5f4533d3ba294e450ac, daughterA=6be54277077587710f4f7bf99a76fc14, daughterB=a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac, UNASSIGN}] 2024-12-06T08:16:48,136 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac, UNASSIGN 2024-12-06T08:16:48,137 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=68425bdac859c5f4533d3ba294e450ac, regionState=CLOSING, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,138 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42321 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=b6b797fc3981,41981,1733472972440, table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-06T08:16:48,139 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-06T08:16:48,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE; CloseRegionProcedure 68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440}] 2024-12-06T08:16:48,295 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,297 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(124): Close 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,297 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-06T08:16:48,298 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1681): Closing 68425bdac859c5f4533d3ba294e450ac, disabling compactions & flushes 2024-12-06T08:16:48,298 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:48,298 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:48,298 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. after waiting 0 ms 2024-12-06T08:16:48,298 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:48,298 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(2837): Flushing 68425bdac859c5f4533d3ba294e450ac 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-06T08:16:48,303 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/982a711d5eb74412919c31f72ef83c2f is 1080, key is row0068/info:/1733473008055/Put/seqid=0 2024-12-06T08:16:48,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741846_1022 (size=28684) 2024-12-06T08:16:48,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741846_1022 (size=28684) 2024-12-06T08:16:48,309 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/982a711d5eb74412919c31f72ef83c2f 2024-12-06T08:16:48,315 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/.tmp/info/982a711d5eb74412919c31f72ef83c2f as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/982a711d5eb74412919c31f72ef83c2f 2024-12-06T08:16:48,320 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/982a711d5eb74412919c31f72ef83c2f, entries=22, sequenceid=110, filesize=28.0 K 2024-12-06T08:16:48,321 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(3040): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=0 B/0 for 68425bdac859c5f4533d3ba294e450ac in 23ms, sequenceid=110, compaction requested=false 2024-12-06T08:16:48,322 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/50d2c9b97928426e9c9f00d46292c200, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/e4b86b911e454c8496d02d420b7af310, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f8baadd44bfe4b3e812358b918c1a39d, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/53f36f7cb7b748d3ae9edbac7f30fc9e] to archive 2024-12-06T08:16:48,323 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:16:48,325 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/50d2c9b97928426e9c9f00d46292c200 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/50d2c9b97928426e9c9f00d46292c200 2024-12-06T08:16:48,326 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/6adaea2a51b4468e844b6b8103de0329 2024-12-06T08:16:48,327 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/8c9c687afc6f4ecd9b83facb691ccb68 2024-12-06T08:16:48,329 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/e4b86b911e454c8496d02d420b7af310 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/e4b86b911e454c8496d02d420b7af310 2024-12-06T08:16:48,330 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f8baadd44bfe4b3e812358b918c1a39d to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f8baadd44bfe4b3e812358b918c1a39d 2024-12-06T08:16:48,331 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/53f36f7cb7b748d3ae9edbac7f30fc9e to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/53f36f7cb7b748d3ae9edbac7f30fc9e 2024-12-06T08:16:48,335 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/recovered.edits/113.seqid, newMaxSeqId=113, maxSeqId=1 2024-12-06T08:16:48,336 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. 2024-12-06T08:16:48,336 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] regionserver.HRegion(1635): Region close journal for 68425bdac859c5f4533d3ba294e450ac: 2024-12-06T08:16:48,338 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION, pid=14}] handler.UnassignRegionHandler(170): Closed 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,338 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=68425bdac859c5f4533d3ba294e450ac, regionState=CLOSED 2024-12-06T08:16:48,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=13 2024-12-06T08:16:48,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=13, state=SUCCESS; CloseRegionProcedure 68425bdac859c5f4533d3ba294e450ac, server=b6b797fc3981,41981,1733472972440 in 201 msec 2024-12-06T08:16:48,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-06T08:16:48,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=68425bdac859c5f4533d3ba294e450ac, UNASSIGN in 207 msec 2024-12-06T08:16:48,364 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:48,365 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=12 splitting 2 storefiles, region=68425bdac859c5f4533d3ba294e450ac, threads=2 2024-12-06T08:16:48,366 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/982a711d5eb74412919c31f72ef83c2f for region: 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,366 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=12 splitting started for store file: hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a for region: 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,375 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/982a711d5eb74412919c31f72ef83c2f, top=true 2024-12-06T08:16:48,379 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f for child: a8a53d5d20d3229597d6b3fca475c552, parent: 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,380 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/982a711d5eb74412919c31f72ef83c2f for region: 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741847_1023 (size=27) 2024-12-06T08:16:48,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741847_1023 (size=27) 2024-12-06T08:16:48,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741848_1024 (size=27) 2024-12-06T08:16:48,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741848_1024 (size=27) 2024-12-06T08:16:48,390 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=12 splitting complete for store file: hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a for region: 68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:16:48,390 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=12 split storefiles for region 68425bdac859c5f4533d3ba294e450ac Daughter A: [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac] storefiles, Daughter B: [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac] storefiles. 2024-12-06T08:16:48,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741849_1025 (size=71) 2024-12-06T08:16:48,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741849_1025 (size=71) 2024-12-06T08:16:48,400 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:48,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741850_1026 (size=71) 2024-12-06T08:16:48,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741850_1026 (size=71) 2024-12-06T08:16:48,412 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:48,422 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/recovered.edits/113.seqid, newMaxSeqId=113, maxSeqId=-1 2024-12-06T08:16:48,424 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/recovered.edits/113.seqid, newMaxSeqId=113, maxSeqId=-1 2024-12-06T08:16:48,426 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733473008426"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733473008426"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733473008426"}]},"ts":"1733473008426"} 2024-12-06T08:16:48,426 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733473008426"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473008426"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733473008426"}]},"ts":"1733473008426"} 2024-12-06T08:16:48,426 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733473008426"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473008426"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733473008426"}]},"ts":"1733473008426"} 2024-12-06T08:16:48,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41981 {}] regionserver.HRegion(8581): Flush requested on 1588230740 2024-12-06T08:16:48,456 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-06T08:16:48,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=4.75 KB heapSize=8.29 KB 2024-12-06T08:16:48,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6be54277077587710f4f7bf99a76fc14, ASSIGN}, {pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a8a53d5d20d3229597d6b3fca475c552, ASSIGN}] 2024-12-06T08:16:48,461 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6be54277077587710f4f7bf99a76fc14, ASSIGN 2024-12-06T08:16:48,462 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a8a53d5d20d3229597d6b3fca475c552, ASSIGN 2024-12-06T08:16:48,462 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=15, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6be54277077587710f4f7bf99a76fc14, ASSIGN; state=SPLITTING_NEW, location=b6b797fc3981,41981,1733472972440; forceNewPlan=false, retain=false 2024-12-06T08:16:48,462 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=16, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a8a53d5d20d3229597d6b3fca475c552, ASSIGN; state=SPLITTING_NEW, location=b6b797fc3981,41981,1733472972440; forceNewPlan=false, retain=false 2024-12-06T08:16:48,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/info/317c65fdfc6e40959ad1db1ec7321ce2 is 193, key is TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552./info:regioninfo/1733473008426/Put/seqid=0 2024-12-06T08:16:48,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741851_1027 (size=9423) 2024-12-06T08:16:48,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741851_1027 (size=9423) 2024-12-06T08:16:48,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.54 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/info/317c65fdfc6e40959ad1db1ec7321ce2 2024-12-06T08:16:48,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/table/cca245b13a83436a8388cf4bfffe4f13 is 65, key is TestLogRolling-testLogRolling/table:state/1733472974131/Put/seqid=0 2024-12-06T08:16:48,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741852_1028 (size=5412) 2024-12-06T08:16:48,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741852_1028 (size=5412) 2024-12-06T08:16:48,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=216 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/table/cca245b13a83436a8388cf4bfffe4f13 2024-12-06T08:16:48,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/info/317c65fdfc6e40959ad1db1ec7321ce2 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/info/317c65fdfc6e40959ad1db1ec7321ce2 2024-12-06T08:16:48,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/info/317c65fdfc6e40959ad1db1ec7321ce2, entries=29, sequenceid=17, filesize=9.2 K 2024-12-06T08:16:48,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/table/cca245b13a83436a8388cf4bfffe4f13 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/table/cca245b13a83436a8388cf4bfffe4f13 2024-12-06T08:16:48,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/table/cca245b13a83436a8388cf4bfffe4f13, entries=4, sequenceid=17, filesize=5.3 K 2024-12-06T08:16:48,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~4.75 KB/4869, heapSize ~8.01 KB/8200, currentSize=0 B/0 for 1588230740 in 66ms, sequenceid=17, compaction requested=false 2024-12-06T08:16:48,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-06T08:16:48,613 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=a8a53d5d20d3229597d6b3fca475c552, regionState=OPENING, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,613 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=6be54277077587710f4f7bf99a76fc14, regionState=OPENING, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; OpenRegionProcedure a8a53d5d20d3229597d6b3fca475c552, server=b6b797fc3981,41981,1733472972440}] 2024-12-06T08:16:48,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=15, state=RUNNABLE; OpenRegionProcedure 6be54277077587710f4f7bf99a76fc14, server=b6b797fc3981,41981,1733472972440}] 2024-12-06T08:16:48,768 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,771 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:16:48,772 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7285): Opening region: {ENCODED => 6be54277077587710f4f7bf99a76fc14, NAME => 'TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-06T08:16:48,772 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6be54277077587710f4f7bf99a76fc14 2024-12-06T08:16:48,772 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:48,772 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7327): checking encryption for 6be54277077587710f4f7bf99a76fc14 2024-12-06T08:16:48,772 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(7330): checking classloading for 6be54277077587710f4f7bf99a76fc14 2024-12-06T08:16:48,773 INFO [StoreOpener-6be54277077587710f4f7bf99a76fc14-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6be54277077587710f4f7bf99a76fc14 2024-12-06T08:16:48,774 INFO [StoreOpener-6be54277077587710f4f7bf99a76fc14-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6be54277077587710f4f7bf99a76fc14 columnFamilyName info 2024-12-06T08:16:48,774 DEBUG [StoreOpener-6be54277077587710f4f7bf99a76fc14-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:48,786 DEBUG [StoreOpener-6be54277077587710f4f7bf99a76fc14-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac->hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a-bottom 2024-12-06T08:16:48,786 INFO [StoreOpener-6be54277077587710f4f7bf99a76fc14-1 {}] regionserver.HStore(327): Store=6be54277077587710f4f7bf99a76fc14/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:16:48,787 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14 2024-12-06T08:16:48,788 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14 2024-12-06T08:16:48,790 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1085): writing seq id for 6be54277077587710f4f7bf99a76fc14 2024-12-06T08:16:48,790 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1102): Opened 6be54277077587710f4f7bf99a76fc14; next sequenceid=114; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=867345, jitterRate=0.10288681089878082}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:16:48,791 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegion(1001): Region open journal for 6be54277077587710f4f7bf99a76fc14: 2024-12-06T08:16:48,792 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14., pid=18, masterSystemTime=1733473008768 2024-12-06T08:16:48,792 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(403): Add compact mark for store 6be54277077587710f4f7bf99a76fc14:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:16:48,792 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:48,792 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-06T08:16:48,793 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:16:48,793 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): 6be54277077587710f4f7bf99a76fc14/info is initiating minor compaction (all files) 2024-12-06T08:16:48,793 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6be54277077587710f4f7bf99a76fc14/info in TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:16:48,793 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac->hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a-bottom] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/.tmp, totalSize=75.7 K 2024-12-06T08:16:48,793 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1733472983786 2024-12-06T08:16:48,794 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:16:48,794 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=18}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:16:48,794 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(135): Open TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:16:48,794 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7285): Opening region: {ENCODED => a8a53d5d20d3229597d6b3fca475c552, NAME => 'TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-06T08:16:48,794 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=15 updating hbase:meta row=6be54277077587710f4f7bf99a76fc14, regionState=OPEN, openSeqNum=114, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,794 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,794 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(894): Instantiated TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:16:48,795 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7327): checking encryption for a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,795 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7330): checking classloading for a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,796 INFO [StoreOpener-a8a53d5d20d3229597d6b3fca475c552-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,797 INFO [StoreOpener-a8a53d5d20d3229597d6b3fca475c552-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a8a53d5d20d3229597d6b3fca475c552 columnFamilyName info 2024-12-06T08:16:48,797 DEBUG [StoreOpener-a8a53d5d20d3229597d6b3fca475c552-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:16:48,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=15 2024-12-06T08:16:48,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=15, state=SUCCESS; OpenRegionProcedure 6be54277077587710f4f7bf99a76fc14, server=b6b797fc3981,41981,1733472972440 in 180 msec 2024-12-06T08:16:48,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6be54277077587710f4f7bf99a76fc14, ASSIGN in 337 msec 2024-12-06T08:16:48,810 DEBUG [StoreOpener-a8a53d5d20d3229597d6b3fca475c552-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f 2024-12-06T08:16:48,815 DEBUG [StoreOpener-a8a53d5d20d3229597d6b3fca475c552-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac->hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a-top 2024-12-06T08:16:48,815 INFO [StoreOpener-a8a53d5d20d3229597d6b3fca475c552-1 {}] regionserver.HStore(327): Store=a8a53d5d20d3229597d6b3fca475c552/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:16:48,816 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,818 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,820 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6be54277077587710f4f7bf99a76fc14#info#compaction#48 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:16:48,821 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/.tmp/info/7f937cd46ce54c01a9714e3e58f4935e is 1080, key is row0001/info:/1733472983786/Put/seqid=0 2024-12-06T08:16:48,821 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1085): writing seq id for a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:16:48,822 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1102): Opened a8a53d5d20d3229597d6b3fca475c552; next sequenceid=114; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805391, jitterRate=0.024108558893203735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:16:48,822 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1001): Region open journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:16:48,823 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2601): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., pid=17, masterSystemTime=1733473008768 2024-12-06T08:16:48,823 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(403): Add compact mark for store a8a53d5d20d3229597d6b3fca475c552:info, priority=-2147483648, current under compaction store size is 2 2024-12-06T08:16:48,823 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:48,823 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-06T08:16:48,824 INFO [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.HStore(1526): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:16:48,824 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.HStore(1540): a8a53d5d20d3229597d6b3fca475c552/info is initiating minor compaction (all files) 2024-12-06T08:16:48,825 INFO [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8a53d5d20d3229597d6b3fca475c552/info in TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:16:48,825 DEBUG [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2628): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:16:48,825 INFO [RS_OPEN_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(164): Opened TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:16:48,825 INFO [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac->hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a-top, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp, totalSize=103.7 K 2024-12-06T08:16:48,826 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] compactions.Compactor(224): Compacting f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733472983786 2024-12-06T08:16:48,826 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=16 updating hbase:meta row=a8a53d5d20d3229597d6b3fca475c552, regionState=OPEN, openSeqNum=114, regionLocation=b6b797fc3981,41981,1733472972440 2024-12-06T08:16:48,826 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] compactions.Compactor(224): Compacting TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1733473008055 2024-12-06T08:16:48,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-06T08:16:48,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; OpenRegionProcedure a8a53d5d20d3229597d6b3fca475c552, server=b6b797fc3981,41981,1733472972440 in 212 msec 2024-12-06T08:16:48,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=12 2024-12-06T08:16:48,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a8a53d5d20d3229597d6b3fca475c552, ASSIGN in 369 msec 2024-12-06T08:16:48,833 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=68425bdac859c5f4533d3ba294e450ac, daughterA=6be54277077587710f4f7bf99a76fc14, daughterB=a8a53d5d20d3229597d6b3fca475c552 in 709 msec 2024-12-06T08:16:48,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741853_1029 (size=70862) 2024-12-06T08:16:48,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741853_1029 (size=70862) 2024-12-06T08:16:48,847 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/.tmp/info/7f937cd46ce54c01a9714e3e58f4935e as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/info/7f937cd46ce54c01a9714e3e58f4935e 2024-12-06T08:16:48,850 INFO [RS:0;b6b797fc3981:41981-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8a53d5d20d3229597d6b3fca475c552#info#compaction#49 average throughput is 28.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:16:48,851 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/3e5da1a5efc0474c80026c76bc4ebab3 is 1080, key is row0062/info:/1733473006047/Put/seqid=0 2024-12-06T08:16:48,853 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-06T08:16:48,854 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 1 (all) file(s) in 6be54277077587710f4f7bf99a76fc14/info of 6be54277077587710f4f7bf99a76fc14 into 7f937cd46ce54c01a9714e3e58f4935e(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:16:48,854 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6be54277077587710f4f7bf99a76fc14: 2024-12-06T08:16:48,854 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14., storeName=6be54277077587710f4f7bf99a76fc14/info, priority=15, startTime=1733473008792; duration=0sec 2024-12-06T08:16:48,854 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:48,854 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6be54277077587710f4f7bf99a76fc14:info 2024-12-06T08:16:48,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741854_1030 (size=35344) 2024-12-06T08:16:48,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741854_1030 (size=35344) 2024-12-06T08:16:48,863 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/3e5da1a5efc0474c80026c76bc4ebab3 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/3e5da1a5efc0474c80026c76bc4ebab3 2024-12-06T08:16:48,869 INFO [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 2 (all) file(s) in a8a53d5d20d3229597d6b3fca475c552/info of a8a53d5d20d3229597d6b3fca475c552 into 3e5da1a5efc0474c80026c76bc4ebab3(size=34.5 K), total size for store is 34.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:16:48,869 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:16:48,869 INFO [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., storeName=a8a53d5d20d3229597d6b3fca475c552/info, priority=14, startTime=1733473008823; duration=0sec 2024-12-06T08:16:48,869 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:16:48,869 DEBUG [RS:0;b6b797fc3981:41981-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8a53d5d20d3229597d6b3fca475c552:info 2024-12-06T08:16:48,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:49,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:50,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:51,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:52,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:53,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,682 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-06T08:16:53,682 INFO [master/b6b797fc3981:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-06T08:16:53,867 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-06T08:16:53,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-06T08:16:53,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:54,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:55,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:56,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:57,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:58,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53700 deadline: 1733473028143, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733472973776.68425bdac859c5f4533d3ba294e450ac. is not online on b6b797fc3981,41981,1733472972440 2024-12-06T08:16:58,213 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-06T08:16:58,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:16:59,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:00,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:01,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:02,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:03,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:04,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:05,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:06,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:07,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:08,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:09,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:10,713 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1fe245b8d8375ab9b801a570a1b2b5e8, had cached 0 bytes from a total of 23930 2024-12-06T08:17:10,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:11,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:12,379 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:17:12,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:13,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:14,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:15,127 DEBUG [master/b6b797fc3981:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 14018a782b624965e782ac68a947637a changed from -1.0 to 0.0, refreshing cache 2024-12-06T08:17:15,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:16,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:17,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:18,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:18,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:17:18,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/2658aa6ddf214ff4b944d8969196b0f6 is 1080, key is row0090/info:/1733473038170/Put/seqid=0 2024-12-06T08:17:18,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741855_1031 (size=12509) 2024-12-06T08:17:18,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741855_1031 (size=12509) 2024-12-06T08:17:18,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/2658aa6ddf214ff4b944d8969196b0f6 2024-12-06T08:17:18,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/2658aa6ddf214ff4b944d8969196b0f6 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/2658aa6ddf214ff4b944d8969196b0f6 2024-12-06T08:17:18,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/2658aa6ddf214ff4b944d8969196b0f6, entries=7, sequenceid=124, filesize=12.2 K 2024-12-06T08:17:18,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a8a53d5d20d3229597d6b3fca475c552 in 42ms, sequenceid=124, compaction requested=false 2024-12-06T08:17:18,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:18,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:19,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:20,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:20,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:17:20,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/589be70033014c5fa61a29b587fe57ed is 1080, key is row0097/info:/1733473040178/Put/seqid=0 2024-12-06T08:17:20,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741856_1032 (size=12515) 2024-12-06T08:17:20,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741856_1032 (size=12515) 2024-12-06T08:17:20,197 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/589be70033014c5fa61a29b587fe57ed 2024-12-06T08:17:20,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/589be70033014c5fa61a29b587fe57ed as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/589be70033014c5fa61a29b587fe57ed 2024-12-06T08:17:20,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/589be70033014c5fa61a29b587fe57ed, entries=7, sequenceid=134, filesize=12.2 K 2024-12-06T08:17:20,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for a8a53d5d20d3229597d6b3fca475c552 in 25ms, sequenceid=134, compaction requested=true 2024-12-06T08:17:20,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:20,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:20,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a8a53d5d20d3229597d6b3fca475c552:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:17:20,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:20,210 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:17:20,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-06T08:17:20,211 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 60368 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:17:20,212 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): a8a53d5d20d3229597d6b3fca475c552/info is initiating minor compaction (all files) 2024-12-06T08:17:20,212 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8a53d5d20d3229597d6b3fca475c552/info in TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:20,212 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/3e5da1a5efc0474c80026c76bc4ebab3, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/2658aa6ddf214ff4b944d8969196b0f6, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/589be70033014c5fa61a29b587fe57ed] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp, totalSize=59.0 K 2024-12-06T08:17:20,212 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e5da1a5efc0474c80026c76bc4ebab3, keycount=28, bloomtype=ROW, size=34.5 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1733473006047 2024-12-06T08:17:20,213 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2658aa6ddf214ff4b944d8969196b0f6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733473038170 2024-12-06T08:17:20,213 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 589be70033014c5fa61a29b587fe57ed, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473040178 2024-12-06T08:17:20,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/c7f9d78f612a4f688b06002af8849d3b is 1080, key is row0104/info:/1733473040186/Put/seqid=0 2024-12-06T08:17:20,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741857_1033 (size=28706) 2024-12-06T08:17:20,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741857_1033 (size=28706) 2024-12-06T08:17:20,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/c7f9d78f612a4f688b06002af8849d3b 2024-12-06T08:17:20,227 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8a53d5d20d3229597d6b3fca475c552#info#compaction#53 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:17:20,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/c7f9d78f612a4f688b06002af8849d3b as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/c7f9d78f612a4f688b06002af8849d3b 2024-12-06T08:17:20,228 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/977befa68dba4b8f8d1acfc61f852bb5 is 1080, key is row0062/info:/1733473006047/Put/seqid=0 2024-12-06T08:17:20,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/c7f9d78f612a4f688b06002af8849d3b, entries=22, sequenceid=159, filesize=28.0 K 2024-12-06T08:17:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741858_1034 (size=50534) 2024-12-06T08:17:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741858_1034 (size=50534) 2024-12-06T08:17:20,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for a8a53d5d20d3229597d6b3fca475c552 in 23ms, sequenceid=159, compaction requested=false 2024-12-06T08:17:20,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:20,238 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/977befa68dba4b8f8d1acfc61f852bb5 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/977befa68dba4b8f8d1acfc61f852bb5 2024-12-06T08:17:20,244 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a8a53d5d20d3229597d6b3fca475c552/info of a8a53d5d20d3229597d6b3fca475c552 into 977befa68dba4b8f8d1acfc61f852bb5(size=49.3 K), total size for store is 77.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:17:20,244 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:20,244 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., storeName=a8a53d5d20d3229597d6b3fca475c552/info, priority=13, startTime=1733473040210; duration=0sec 2024-12-06T08:17:20,244 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:20,244 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8a53d5d20d3229597d6b3fca475c552:info 2024-12-06T08:17:20,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:21,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:22,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:22,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:17:22,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5b7063132eeb47d0b669b2e831b2174a is 1080, key is row0126/info:/1733473040211/Put/seqid=0 2024-12-06T08:17:22,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741859_1035 (size=12516) 2024-12-06T08:17:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741859_1035 (size=12516) 2024-12-06T08:17:22,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5b7063132eeb47d0b669b2e831b2174a 2024-12-06T08:17:22,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5b7063132eeb47d0b669b2e831b2174a as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5b7063132eeb47d0b669b2e831b2174a 2024-12-06T08:17:22,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5b7063132eeb47d0b669b2e831b2174a, entries=7, sequenceid=170, filesize=12.2 K 2024-12-06T08:17:22,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for a8a53d5d20d3229597d6b3fca475c552 in 25ms, sequenceid=170, compaction requested=true 2024-12-06T08:17:22,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:22,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a8a53d5d20d3229597d6b3fca475c552:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:17:22,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:22,245 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:17:22,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:22,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T08:17:22,246 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 91756 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:17:22,246 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): a8a53d5d20d3229597d6b3fca475c552/info is initiating minor compaction (all files) 2024-12-06T08:17:22,246 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8a53d5d20d3229597d6b3fca475c552/info in TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:22,246 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/977befa68dba4b8f8d1acfc61f852bb5, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/c7f9d78f612a4f688b06002af8849d3b, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5b7063132eeb47d0b669b2e831b2174a] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp, totalSize=89.6 K 2024-12-06T08:17:22,247 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 977befa68dba4b8f8d1acfc61f852bb5, keycount=42, bloomtype=ROW, size=49.3 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733473006047 2024-12-06T08:17:22,248 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7f9d78f612a4f688b06002af8849d3b, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1733473040186 2024-12-06T08:17:22,248 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b7063132eeb47d0b669b2e831b2174a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473040211 2024-12-06T08:17:22,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/fa73ff03c30c4b2aa7f76a9b36ea1714 is 1080, key is row0133/info:/1733473042220/Put/seqid=0 2024-12-06T08:17:22,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741860_1036 (size=29784) 2024-12-06T08:17:22,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741860_1036 (size=29784) 2024-12-06T08:17:22,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/fa73ff03c30c4b2aa7f76a9b36ea1714 2024-12-06T08:17:22,261 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8a53d5d20d3229597d6b3fca475c552#info#compaction#56 average throughput is 36.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:17:22,261 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/f4357a2ef942470cbf2b3eec4b7c27ea is 1080, key is row0062/info:/1733473006047/Put/seqid=0 2024-12-06T08:17:22,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741861_1037 (size=82039) 2024-12-06T08:17:22,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741861_1037 (size=82039) 2024-12-06T08:17:22,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/fa73ff03c30c4b2aa7f76a9b36ea1714 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/fa73ff03c30c4b2aa7f76a9b36ea1714 2024-12-06T08:17:22,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/fa73ff03c30c4b2aa7f76a9b36ea1714, entries=23, sequenceid=196, filesize=29.1 K 2024-12-06T08:17:22,275 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/f4357a2ef942470cbf2b3eec4b7c27ea as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f4357a2ef942470cbf2b3eec4b7c27ea 2024-12-06T08:17:22,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for a8a53d5d20d3229597d6b3fca475c552 in 30ms, sequenceid=196, compaction requested=false 2024-12-06T08:17:22,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:22,280 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a8a53d5d20d3229597d6b3fca475c552/info of a8a53d5d20d3229597d6b3fca475c552 into f4357a2ef942470cbf2b3eec4b7c27ea(size=80.1 K), total size for store is 109.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:17:22,280 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:22,280 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., storeName=a8a53d5d20d3229597d6b3fca475c552/info, priority=13, startTime=1733473042245; duration=0sec 2024-12-06T08:17:22,280 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:22,281 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8a53d5d20d3229597d6b3fca475c552:info 2024-12-06T08:17:22,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:23,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:24,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:24,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:17:24,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/29bda7277b544ee88b79e8e89f29c158 is 1080, key is row0156/info:/1733473042245/Put/seqid=0 2024-12-06T08:17:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741862_1038 (size=12516) 2024-12-06T08:17:24,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/29bda7277b544ee88b79e8e89f29c158 2024-12-06T08:17:24,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741862_1038 (size=12516) 2024-12-06T08:17:24,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/29bda7277b544ee88b79e8e89f29c158 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29bda7277b544ee88b79e8e89f29c158 2024-12-06T08:17:24,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29bda7277b544ee88b79e8e89f29c158, entries=7, sequenceid=207, filesize=12.2 K 2024-12-06T08:17:24,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for a8a53d5d20d3229597d6b3fca475c552 in 23ms, sequenceid=207, compaction requested=true 2024-12-06T08:17:24,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:24,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a8a53d5d20d3229597d6b3fca475c552:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:17:24,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:24,278 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:17:24,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:24,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-06T08:17:24,279 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 124339 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:17:24,279 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): a8a53d5d20d3229597d6b3fca475c552/info is initiating minor compaction (all files) 2024-12-06T08:17:24,279 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8a53d5d20d3229597d6b3fca475c552/info in TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:24,280 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f4357a2ef942470cbf2b3eec4b7c27ea, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/fa73ff03c30c4b2aa7f76a9b36ea1714, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29bda7277b544ee88b79e8e89f29c158] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp, totalSize=121.4 K 2024-12-06T08:17:24,280 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4357a2ef942470cbf2b3eec4b7c27ea, keycount=71, bloomtype=ROW, size=80.1 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733473006047 2024-12-06T08:17:24,281 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa73ff03c30c4b2aa7f76a9b36ea1714, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733473042220 2024-12-06T08:17:24,281 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29bda7277b544ee88b79e8e89f29c158, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733473042245 2024-12-06T08:17:24,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/646f79e3180b4a8795c3ef84ec16b847 is 1080, key is row0163/info:/1733473044256/Put/seqid=0 2024-12-06T08:17:24,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741863_1039 (size=27628) 2024-12-06T08:17:24,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/646f79e3180b4a8795c3ef84ec16b847 2024-12-06T08:17:24,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741863_1039 (size=27628) 2024-12-06T08:17:24,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a8a53d5d20d3229597d6b3fca475c552, server=b6b797fc3981,41981,1733472972440 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:17:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53700 deadline: 1733473054291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a8a53d5d20d3229597d6b3fca475c552, server=b6b797fc3981,41981,1733472972440 2024-12-06T08:17:24,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/646f79e3180b4a8795c3ef84ec16b847 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/646f79e3180b4a8795c3ef84ec16b847 2024-12-06T08:17:24,296 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8a53d5d20d3229597d6b3fca475c552#info#compaction#59 average throughput is 25.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:17:24,296 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/17fc36aea10f4583bb6d5a0bf24d5997 is 1080, key is row0062/info:/1733473006047/Put/seqid=0 2024-12-06T08:17:24,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741864_1040 (size=114489) 2024-12-06T08:17:24,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/646f79e3180b4a8795c3ef84ec16b847, entries=21, sequenceid=231, filesize=27.0 K 2024-12-06T08:17:24,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741864_1040 (size=114489) 2024-12-06T08:17:24,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=8.41 KB/8608 for a8a53d5d20d3229597d6b3fca475c552 in 22ms, sequenceid=231, compaction requested=false 2024-12-06T08:17:24,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:24,305 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/17fc36aea10f4583bb6d5a0bf24d5997 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/17fc36aea10f4583bb6d5a0bf24d5997 2024-12-06T08:17:24,310 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a8a53d5d20d3229597d6b3fca475c552/info of a8a53d5d20d3229597d6b3fca475c552 into 17fc36aea10f4583bb6d5a0bf24d5997(size=111.8 K), total size for store is 138.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:17:24,310 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:24,310 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., storeName=a8a53d5d20d3229597d6b3fca475c552/info, priority=13, startTime=1733473044278; duration=0sec 2024-12-06T08:17:24,310 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:24,310 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8a53d5d20d3229597d6b3fca475c552:info 2024-12-06T08:17:24,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:25,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:26,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:27,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:27,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta after 196119ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor238.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-06T08:17:28,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:29,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:30,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:31,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:32,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:33,772 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 6be54277077587710f4f7bf99a76fc14, had cached 0 bytes from a total of 70862 2024-12-06T08:17:33,795 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region a8a53d5d20d3229597d6b3fca475c552, had cached 0 bytes from a total of 142117 2024-12-06T08:17:33,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:34,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:34,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-06T08:17:34,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/902fb2828e124f0db2c02208eefaf974 is 1080, key is row0184/info:/1733473044279/Put/seqid=0 2024-12-06T08:17:34,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741865_1041 (size=14672) 2024-12-06T08:17:34,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741865_1041 (size=14672) 2024-12-06T08:17:34,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/902fb2828e124f0db2c02208eefaf974 2024-12-06T08:17:34,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/902fb2828e124f0db2c02208eefaf974 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/902fb2828e124f0db2c02208eefaf974 2024-12-06T08:17:34,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/902fb2828e124f0db2c02208eefaf974, entries=9, sequenceid=244, filesize=14.3 K 2024-12-06T08:17:34,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for a8a53d5d20d3229597d6b3fca475c552 in 25ms, sequenceid=244, compaction requested=true 2024-12-06T08:17:34,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:34,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a8a53d5d20d3229597d6b3fca475c552:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:17:34,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:34,358 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:17:34,359 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 156789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:17:34,359 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): a8a53d5d20d3229597d6b3fca475c552/info is initiating minor compaction (all files) 2024-12-06T08:17:34,359 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8a53d5d20d3229597d6b3fca475c552/info in TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:34,359 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/17fc36aea10f4583bb6d5a0bf24d5997, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/646f79e3180b4a8795c3ef84ec16b847, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/902fb2828e124f0db2c02208eefaf974] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp, totalSize=153.1 K 2024-12-06T08:17:34,360 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17fc36aea10f4583bb6d5a0bf24d5997, keycount=101, bloomtype=ROW, size=111.8 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733473006047 2024-12-06T08:17:34,360 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 646f79e3180b4a8795c3ef84ec16b847, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733473044256 2024-12-06T08:17:34,361 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 902fb2828e124f0db2c02208eefaf974, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733473044279 2024-12-06T08:17:34,375 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8a53d5d20d3229597d6b3fca475c552#info#compaction#61 average throughput is 33.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:17:34,375 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/180bbb4f08b24bdabcba91095b3f8864 is 1080, key is row0062/info:/1733473006047/Put/seqid=0 2024-12-06T08:17:34,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741866_1042 (size=147136) 2024-12-06T08:17:34,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741866_1042 (size=147136) 2024-12-06T08:17:34,399 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/180bbb4f08b24bdabcba91095b3f8864 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/180bbb4f08b24bdabcba91095b3f8864 2024-12-06T08:17:34,405 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a8a53d5d20d3229597d6b3fca475c552/info of a8a53d5d20d3229597d6b3fca475c552 into 180bbb4f08b24bdabcba91095b3f8864(size=143.7 K), total size for store is 143.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:17:34,405 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:34,405 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., storeName=a8a53d5d20d3229597d6b3fca475c552/info, priority=13, startTime=1733473054358; duration=0sec 2024-12-06T08:17:34,405 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:34,405 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8a53d5d20d3229597d6b3fca475c552:info 2024-12-06T08:17:34,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:35,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:36,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:17:36,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/1bbd12d894d94643a75ad557c2873c21 is 1080, key is row0193/info:/1733473056334/Put/seqid=0 2024-12-06T08:17:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741867_1043 (size=12516) 2024-12-06T08:17:36,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741867_1043 (size=12516) 2024-12-06T08:17:36,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/1bbd12d894d94643a75ad557c2873c21 2024-12-06T08:17:36,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/1bbd12d894d94643a75ad557c2873c21 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1bbd12d894d94643a75ad557c2873c21 2024-12-06T08:17:36,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1bbd12d894d94643a75ad557c2873c21, entries=7, sequenceid=255, filesize=12.2 K 2024-12-06T08:17:36,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for a8a53d5d20d3229597d6b3fca475c552 in 23ms, sequenceid=255, compaction requested=false 2024-12-06T08:17:36,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:36,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-06T08:17:36,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5455d10fa9f9474ca5eacef4cd5da265 is 1080, key is row0200/info:/1733473056342/Put/seqid=0 2024-12-06T08:17:36,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741868_1044 (size=28728) 2024-12-06T08:17:36,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741868_1044 (size=28728) 2024-12-06T08:17:36,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5455d10fa9f9474ca5eacef4cd5da265 2024-12-06T08:17:36,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5455d10fa9f9474ca5eacef4cd5da265 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5455d10fa9f9474ca5eacef4cd5da265 2024-12-06T08:17:36,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5455d10fa9f9474ca5eacef4cd5da265, entries=22, sequenceid=280, filesize=28.1 K 2024-12-06T08:17:36,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for a8a53d5d20d3229597d6b3fca475c552 in 22ms, sequenceid=280, compaction requested=true 2024-12-06T08:17:36,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:36,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a8a53d5d20d3229597d6b3fca475c552:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:17:36,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:36,386 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:17:36,387 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188380 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:17:36,387 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): a8a53d5d20d3229597d6b3fca475c552/info is initiating minor compaction (all files) 2024-12-06T08:17:36,387 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8a53d5d20d3229597d6b3fca475c552/info in TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:36,387 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/180bbb4f08b24bdabcba91095b3f8864, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1bbd12d894d94643a75ad557c2873c21, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5455d10fa9f9474ca5eacef4cd5da265] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp, totalSize=184.0 K 2024-12-06T08:17:36,388 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 180bbb4f08b24bdabcba91095b3f8864, keycount=131, bloomtype=ROW, size=143.7 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733473006047 2024-12-06T08:17:36,388 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bbd12d894d94643a75ad557c2873c21, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733473056334 2024-12-06T08:17:36,388 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5455d10fa9f9474ca5eacef4cd5da265, keycount=22, bloomtype=ROW, size=28.1 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733473056342 2024-12-06T08:17:36,399 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8a53d5d20d3229597d6b3fca475c552#info#compaction#64 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:17:36,400 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/f6ab865084184833a17b0b06234c8d72 is 1080, key is row0062/info:/1733473006047/Put/seqid=0 2024-12-06T08:17:36,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741869_1045 (size=178530) 2024-12-06T08:17:36,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741869_1045 (size=178530) 2024-12-06T08:17:36,411 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/f6ab865084184833a17b0b06234c8d72 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f6ab865084184833a17b0b06234c8d72 2024-12-06T08:17:36,416 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a8a53d5d20d3229597d6b3fca475c552/info of a8a53d5d20d3229597d6b3fca475c552 into f6ab865084184833a17b0b06234c8d72(size=174.3 K), total size for store is 174.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:17:36,416 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:36,416 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., storeName=a8a53d5d20d3229597d6b3fca475c552/info, priority=13, startTime=1733473056386; duration=0sec 2024-12-06T08:17:36,416 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:36,416 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8a53d5d20d3229597d6b3fca475c552:info 2024-12-06T08:17:36,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:37,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:38,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-06T08:17:38,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/29233129e7374c6ca3c950cf9142f904 is 1080, key is row0222/info:/1733473056365/Put/seqid=0 2024-12-06T08:17:38,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741870_1046 (size=12523) 2024-12-06T08:17:38,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741870_1046 (size=12523) 2024-12-06T08:17:38,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/29233129e7374c6ca3c950cf9142f904 2024-12-06T08:17:38,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/29233129e7374c6ca3c950cf9142f904 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29233129e7374c6ca3c950cf9142f904 2024-12-06T08:17:38,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a8a53d5d20d3229597d6b3fca475c552, server=b6b797fc3981,41981,1733472972440 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-06T08:17:38,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53700 deadline: 1733473068395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a8a53d5d20d3229597d6b3fca475c552, server=b6b797fc3981,41981,1733472972440 2024-12-06T08:17:38,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29233129e7374c6ca3c950cf9142f904, entries=7, sequenceid=291, filesize=12.2 K 2024-12-06T08:17:38,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for a8a53d5d20d3229597d6b3fca475c552 in 24ms, sequenceid=291, compaction requested=false 2024-12-06T08:17:38,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:38,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:39,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:40,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:41,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:42,379 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-06T08:17:42,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:43,214 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 1588230740, had cached 0 bytes from a total of 14835 2024-12-06T08:17:43,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:44,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:45,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:46,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:47,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:48,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41981 {}] regionserver.HRegion(8581): Flush requested on a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:48,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-06T08:17:48,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/1dd14cca802c421b90d9dbbe0d20348f is 1080, key is row0229/info:/1733473058373/Put/seqid=0 2024-12-06T08:17:48,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741871_1047 (size=29807) 2024-12-06T08:17:48,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741871_1047 (size=29807) 2024-12-06T08:17:48,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/1dd14cca802c421b90d9dbbe0d20348f 2024-12-06T08:17:48,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/1dd14cca802c421b90d9dbbe0d20348f as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1dd14cca802c421b90d9dbbe0d20348f 2024-12-06T08:17:48,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1dd14cca802c421b90d9dbbe0d20348f, entries=23, sequenceid=317, filesize=29.1 K 2024-12-06T08:17:48,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for a8a53d5d20d3229597d6b3fca475c552 in 21ms, sequenceid=317, compaction requested=true 2024-12-06T08:17:48,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:48,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a8a53d5d20d3229597d6b3fca475c552:info, priority=-2147483648, current under compaction store size is 1 2024-12-06T08:17:48,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:48,500 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-06T08:17:48,501 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 220860 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-06T08:17:48,501 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1540): a8a53d5d20d3229597d6b3fca475c552/info is initiating minor compaction (all files) 2024-12-06T08:17:48,501 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a8a53d5d20d3229597d6b3fca475c552/info in TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:48,501 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f6ab865084184833a17b0b06234c8d72, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29233129e7374c6ca3c950cf9142f904, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1dd14cca802c421b90d9dbbe0d20348f] into tmpdir=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp, totalSize=215.7 K 2024-12-06T08:17:48,501 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6ab865084184833a17b0b06234c8d72, keycount=160, bloomtype=ROW, size=174.3 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733473006047 2024-12-06T08:17:48,502 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29233129e7374c6ca3c950cf9142f904, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733473056365 2024-12-06T08:17:48,502 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dd14cca802c421b90d9dbbe0d20348f, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1733473058373 2024-12-06T08:17:48,515 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a8a53d5d20d3229597d6b3fca475c552#info#compaction#67 average throughput is 48.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-06T08:17:48,515 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/9c4606f0f33a4a69a599813933dfeb73 is 1080, key is row0062/info:/1733473006047/Put/seqid=0 2024-12-06T08:17:48,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741872_1048 (size=211079) 2024-12-06T08:17:48,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741872_1048 (size=211079) 2024-12-06T08:17:48,525 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/9c4606f0f33a4a69a599813933dfeb73 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/9c4606f0f33a4a69a599813933dfeb73 2024-12-06T08:17:48,531 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a8a53d5d20d3229597d6b3fca475c552/info of a8a53d5d20d3229597d6b3fca475c552 into 9c4606f0f33a4a69a599813933dfeb73(size=206.1 K), total size for store is 206.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-06T08:17:48,531 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:48,531 INFO [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552., storeName=a8a53d5d20d3229597d6b3fca475c552/info, priority=13, startTime=1733473068500; duration=0sec 2024-12-06T08:17:48,531 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-06T08:17:48,531 DEBUG [RS:0;b6b797fc3981:41981-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a8a53d5d20d3229597d6b3fca475c552:info 2024-12-06T08:17:48,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:49,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:50,483 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-06T08:17:50,484 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C41981%2C1733472972440.1733473070483 2024-12-06T08:17:50,490 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733472972821 with entries=309, filesize=306.80 KB; new WAL /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733473070483 2024-12-06T08:17:50,494 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41541:41541),(127.0.0.1/127.0.0.1:35783:35783)] 2024-12-06T08:17:50,494 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733472972821 is not closed yet, will try archiving it next time 2024-12-06T08:17:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741833_1009 (size=314174) 2024-12-06T08:17:50,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741833_1009 (size=314174) 2024-12-06T08:17:50,497 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 6be54277077587710f4f7bf99a76fc14: 2024-12-06T08:17:50,497 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 14018a782b624965e782ac68a947637a 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T08:17:50,513 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a/.tmp/info/de1d9d70baf544ecb1c556efc26937d6 is 45, key is default/info:d/1733472973656/Put/seqid=0 2024-12-06T08:17:50,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741874_1050 (size=5037) 2024-12-06T08:17:50,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741874_1050 (size=5037) 2024-12-06T08:17:50,919 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a/.tmp/info/de1d9d70baf544ecb1c556efc26937d6 2024-12-06T08:17:50,924 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a/.tmp/info/de1d9d70baf544ecb1c556efc26937d6 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a/info/de1d9d70baf544ecb1c556efc26937d6 2024-12-06T08:17:50,929 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a/info/de1d9d70baf544ecb1c556efc26937d6, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T08:17:50,930 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 14018a782b624965e782ac68a947637a in 432ms, sequenceid=6, compaction requested=false 2024-12-06T08:17:50,930 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 14018a782b624965e782ac68a947637a: 2024-12-06T08:17:50,930 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.21 KB heapSize=4.13 KB 2024-12-06T08:17:50,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:50,934 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/info/d4803652b1e94a7ba4ad80c083c3b6d0 is 193, key is TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552./info:regioninfo/1733473008826/Put/seqid=0 2024-12-06T08:17:50,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741875_1051 (size=7803) 2024-12-06T08:17:50,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741875_1051 (size=7803) 2024-12-06T08:17:50,939 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.21 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/info/d4803652b1e94a7ba4ad80c083c3b6d0 2024-12-06T08:17:50,943 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/.tmp/info/d4803652b1e94a7ba4ad80c083c3b6d0 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/info/d4803652b1e94a7ba4ad80c083c3b6d0 2024-12-06T08:17:50,947 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/info/d4803652b1e94a7ba4ad80c083c3b6d0, entries=16, sequenceid=24, filesize=7.6 K 2024-12-06T08:17:50,948 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~2.21 KB/2260, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=24, compaction requested=false 2024-12-06T08:17:50,949 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for 1588230740: 2024-12-06T08:17:50,949 INFO [Time-limited test {}] regionserver.HRegion(2837): Flushing a8a53d5d20d3229597d6b3fca475c552 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-06T08:17:50,952 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5bfcb4a9cb1b44169d0001184b7feb0e is 1080, key is row0252/info:/1733473068478/Put/seqid=0 2024-12-06T08:17:50,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741876_1052 (size=10357) 2024-12-06T08:17:50,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741876_1052 (size=10357) 2024-12-06T08:17:50,957 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5bfcb4a9cb1b44169d0001184b7feb0e 2024-12-06T08:17:50,962 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/.tmp/info/5bfcb4a9cb1b44169d0001184b7feb0e as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5bfcb4a9cb1b44169d0001184b7feb0e 2024-12-06T08:17:50,967 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5bfcb4a9cb1b44169d0001184b7feb0e, entries=5, sequenceid=326, filesize=10.1 K 2024-12-06T08:17:50,967 INFO [Time-limited test {}] regionserver.HRegion(3040): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for a8a53d5d20d3229597d6b3fca475c552 in 18ms, sequenceid=326, compaction requested=false 2024-12-06T08:17:50,968 DEBUG [Time-limited test {}] regionserver.HRegion(2538): Flush status journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:50,968 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C41981%2C1733472972440.1733473070968 2024-12-06T08:17:50,974 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733473070483 with entries=4, filesize=1.22 KB; new WAL /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733473070968 2024-12-06T08:17:50,974 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35783:35783),(127.0.0.1/127.0.0.1:41541:41541)] 2024-12-06T08:17:50,974 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733473070483 is not closed yet, will try archiving it next time 2024-12-06T08:17:50,974 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733472972821 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/oldWALs/b6b797fc3981%2C41981%2C1733472972440.1733472972821 2024-12-06T08:17:50,975 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-06T08:17:50,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741873_1049 (size=1255) 2024-12-06T08:17:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741873_1049 (size=1255) 2024-12-06T08:17:50,976 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440/b6b797fc3981%2C41981%2C1733472972440.1733473070483 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/oldWALs/b6b797fc3981%2C41981%2C1733472972440.1733473070483 2024-12-06T08:17:51,075 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T08:17:51,075 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-06T08:17:51,075 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x314c3bcb to 127.0.0.1:62431 2024-12-06T08:17:51,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:51,075 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:17:51,076 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=544265910, stopped=false 2024-12-06T08:17:51,076 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b6b797fc3981,42321,1733472972396 2024-12-06T08:17:51,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:17:51,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:17:51,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:51,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:51,078 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T08:17:51,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:51,078 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,41981,1733472972440' ***** 2024-12-06T08:17:51,078 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:17:51,079 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(3579): Received CLOSE for 6be54277077587710f4f7bf99a76fc14 2024-12-06T08:17:51,079 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(3579): Received CLOSE for 14018a782b624965e782ac68a947637a 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(3579): Received CLOSE for a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,41981,1733472972440 2024-12-06T08:17:51,079 DEBUG [RS:0;b6b797fc3981:41981 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:17:51,079 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:17:51,080 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:17:51,080 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 6be54277077587710f4f7bf99a76fc14, disabling compactions & flushes 2024-12-06T08:17:51,080 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:17:51,080 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:17:51,080 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. after waiting 0 ms 2024-12-06T08:17:51,080 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:17:51,081 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1599): Waiting on 4 regions to close 2024-12-06T08:17:51,081 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1603): Online Regions={6be54277077587710f4f7bf99a76fc14=TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14., 14018a782b624965e782ac68a947637a=hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a., 1588230740=hbase:meta,,1.1588230740, a8a53d5d20d3229597d6b3fca475c552=TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.} 2024-12-06T08:17:51,081 DEBUG [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1629): Waiting on 14018a782b624965e782ac68a947637a, 1588230740, 6be54277077587710f4f7bf99a76fc14, a8a53d5d20d3229597d6b3fca475c552 2024-12-06T08:17:51,081 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:17:51,081 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:17:51,081 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:17:51,081 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:17:51,081 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:17:51,081 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac->hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a-bottom] to archive 2024-12-06T08:17:51,082 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:17:51,084 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:17:51,086 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/meta/1588230740/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-12-06T08:17:51,087 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:17:51,087 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:17:51,087 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:17:51,087 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T08:17:51,089 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/6be54277077587710f4f7bf99a76fc14/recovered.edits/118.seqid, newMaxSeqId=118, maxSeqId=113 2024-12-06T08:17:51,090 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:17:51,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 6be54277077587710f4f7bf99a76fc14: 2024-12-06T08:17:51,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733473008123.6be54277077587710f4f7bf99a76fc14. 2024-12-06T08:17:51,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 14018a782b624965e782ac68a947637a, disabling compactions & flushes 2024-12-06T08:17:51,090 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:17:51,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:17:51,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. after waiting 0 ms 2024-12-06T08:17:51,090 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:17:51,094 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/hbase/namespace/14018a782b624965e782ac68a947637a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T08:17:51,094 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:17:51,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 14018a782b624965e782ac68a947637a: 2024-12-06T08:17:51,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733472973244.14018a782b624965e782ac68a947637a. 2024-12-06T08:17:51,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing a8a53d5d20d3229597d6b3fca475c552, disabling compactions & flushes 2024-12-06T08:17:51,095 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:51,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:51,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. after waiting 0 ms 2024-12-06T08:17:51,095 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:51,095 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac->hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/68425bdac859c5f4533d3ba294e450ac/info/f0786242974c4c07837517d883be292a-top, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/3e5da1a5efc0474c80026c76bc4ebab3, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/2658aa6ddf214ff4b944d8969196b0f6, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/977befa68dba4b8f8d1acfc61f852bb5, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/589be70033014c5fa61a29b587fe57ed, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/c7f9d78f612a4f688b06002af8849d3b, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f4357a2ef942470cbf2b3eec4b7c27ea, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5b7063132eeb47d0b669b2e831b2174a, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/fa73ff03c30c4b2aa7f76a9b36ea1714, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/17fc36aea10f4583bb6d5a0bf24d5997, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29bda7277b544ee88b79e8e89f29c158, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/646f79e3180b4a8795c3ef84ec16b847, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/180bbb4f08b24bdabcba91095b3f8864, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/902fb2828e124f0db2c02208eefaf974, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1bbd12d894d94643a75ad557c2873c21, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f6ab865084184833a17b0b06234c8d72, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5455d10fa9f9474ca5eacef4cd5da265, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29233129e7374c6ca3c950cf9142f904, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1dd14cca802c421b90d9dbbe0d20348f] to archive 2024-12-06T08:17:51,097 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-06T08:17:51,099 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f0786242974c4c07837517d883be292a.68425bdac859c5f4533d3ba294e450ac 2024-12-06T08:17:51,100 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/3e5da1a5efc0474c80026c76bc4ebab3 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/3e5da1a5efc0474c80026c76bc4ebab3 2024-12-06T08:17:51,102 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/TestLogRolling-testLogRolling=68425bdac859c5f4533d3ba294e450ac-982a711d5eb74412919c31f72ef83c2f 2024-12-06T08:17:51,103 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/2658aa6ddf214ff4b944d8969196b0f6 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/2658aa6ddf214ff4b944d8969196b0f6 2024-12-06T08:17:51,104 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/977befa68dba4b8f8d1acfc61f852bb5 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/977befa68dba4b8f8d1acfc61f852bb5 2024-12-06T08:17:51,105 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/589be70033014c5fa61a29b587fe57ed to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/589be70033014c5fa61a29b587fe57ed 2024-12-06T08:17:51,107 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/c7f9d78f612a4f688b06002af8849d3b to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/c7f9d78f612a4f688b06002af8849d3b 2024-12-06T08:17:51,108 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f4357a2ef942470cbf2b3eec4b7c27ea to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f4357a2ef942470cbf2b3eec4b7c27ea 2024-12-06T08:17:51,109 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5b7063132eeb47d0b669b2e831b2174a to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5b7063132eeb47d0b669b2e831b2174a 2024-12-06T08:17:51,111 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/fa73ff03c30c4b2aa7f76a9b36ea1714 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/fa73ff03c30c4b2aa7f76a9b36ea1714 2024-12-06T08:17:51,113 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/17fc36aea10f4583bb6d5a0bf24d5997 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/17fc36aea10f4583bb6d5a0bf24d5997 2024-12-06T08:17:51,114 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29bda7277b544ee88b79e8e89f29c158 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29bda7277b544ee88b79e8e89f29c158 2024-12-06T08:17:51,115 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/646f79e3180b4a8795c3ef84ec16b847 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/646f79e3180b4a8795c3ef84ec16b847 2024-12-06T08:17:51,116 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/180bbb4f08b24bdabcba91095b3f8864 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/180bbb4f08b24bdabcba91095b3f8864 2024-12-06T08:17:51,118 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/902fb2828e124f0db2c02208eefaf974 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/902fb2828e124f0db2c02208eefaf974 2024-12-06T08:17:51,119 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1bbd12d894d94643a75ad557c2873c21 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1bbd12d894d94643a75ad557c2873c21 2024-12-06T08:17:51,120 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f6ab865084184833a17b0b06234c8d72 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/f6ab865084184833a17b0b06234c8d72 2024-12-06T08:17:51,121 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5455d10fa9f9474ca5eacef4cd5da265 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/5455d10fa9f9474ca5eacef4cd5da265 2024-12-06T08:17:51,122 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29233129e7374c6ca3c950cf9142f904 to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/29233129e7374c6ca3c950cf9142f904 2024-12-06T08:17:51,124 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1dd14cca802c421b90d9dbbe0d20348f to hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/archive/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/info/1dd14cca802c421b90d9dbbe0d20348f 2024-12-06T08:17:51,128 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/data/default/TestLogRolling-testLogRolling/a8a53d5d20d3229597d6b3fca475c552/recovered.edits/329.seqid, newMaxSeqId=329, maxSeqId=113 2024-12-06T08:17:51,129 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:51,129 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for a8a53d5d20d3229597d6b3fca475c552: 2024-12-06T08:17:51,129 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733473008123.a8a53d5d20d3229597d6b3fca475c552. 2024-12-06T08:17:51,274 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-06T08:17:51,275 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-06T08:17:51,281 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,41981,1733472972440; all regions closed. 2024-12-06T08:17:51,281 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440 2024-12-06T08:17:51,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741834_1010 (size=9351) 2024-12-06T08:17:51,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741834_1010 (size=9351) 2024-12-06T08:17:51,286 DEBUG [RS:0;b6b797fc3981:41981 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/oldWALs 2024-12-06T08:17:51,286 INFO [RS:0;b6b797fc3981:41981 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C41981%2C1733472972440.meta:.meta(num 1733472973205) 2024-12-06T08:17:51,286 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/WALs/b6b797fc3981,41981,1733472972440 2024-12-06T08:17:51,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741877_1053 (size=1071) 2024-12-06T08:17:51,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741877_1053 (size=1071) 2024-12-06T08:17:51,290 DEBUG [RS:0;b6b797fc3981:41981 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/oldWALs 2024-12-06T08:17:51,290 INFO [RS:0;b6b797fc3981:41981 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C41981%2C1733472972440:(num 1733473070968) 2024-12-06T08:17:51,290 DEBUG [RS:0;b6b797fc3981:41981 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:51,290 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:17:51,290 INFO [RS:0;b6b797fc3981:41981 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-06T08:17:51,290 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:17:51,291 INFO [RS:0;b6b797fc3981:41981 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41981 2024-12-06T08:17:51,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:17:51,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,41981,1733472972440 2024-12-06T08:17:51,294 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,41981,1733472972440] 2024-12-06T08:17:51,294 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,41981,1733472972440; numProcessing=1 2024-12-06T08:17:51,295 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,41981,1733472972440 already deleted, retry=false 2024-12-06T08:17:51,295 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,41981,1733472972440 expired; onlineServers=0 2024-12-06T08:17:51,295 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,42321,1733472972396' ***** 2024-12-06T08:17:51,295 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:17:51,295 DEBUG [M:0;b6b797fc3981:42321 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@121a2443, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:17:51,295 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,42321,1733472972396 2024-12-06T08:17:51,295 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,42321,1733472972396; all regions closed. 2024-12-06T08:17:51,295 DEBUG [M:0;b6b797fc3981:42321 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:51,295 DEBUG [M:0;b6b797fc3981:42321 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:17:51,295 DEBUG [M:0;b6b797fc3981:42321 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:17:51,295 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:17:51,295 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472972609 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733472972609,5,FailOnTimeoutGroup] 2024-12-06T08:17:51,295 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472972608 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733472972608,5,FailOnTimeoutGroup] 2024-12-06T08:17:51,295 INFO [M:0;b6b797fc3981:42321 {}] hbase.ChoreService(370): Chore service for: master/b6b797fc3981:0 had [] on shutdown 2024-12-06T08:17:51,295 DEBUG [M:0;b6b797fc3981:42321 {}] master.HMaster(1733): Stopping service threads 2024-12-06T08:17:51,295 INFO [M:0;b6b797fc3981:42321 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:17:51,296 INFO [M:0;b6b797fc3981:42321 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:17:51,296 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:17:51,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:17:51,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:51,296 DEBUG [M:0;b6b797fc3981:42321 {}] zookeeper.ZKUtil(347): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:17:51,296 WARN [M:0;b6b797fc3981:42321 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:17:51,296 INFO [M:0;b6b797fc3981:42321 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T08:17:51,296 INFO [M:0;b6b797fc3981:42321 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:17:51,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:17:51,296 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:17:51,296 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:51,297 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:51,297 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:17:51,297 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:51,297 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=66.44 KB heapSize=81.69 KB 2024-12-06T08:17:51,312 DEBUG [M:0;b6b797fc3981:42321 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0418497bce2a4479b6306758e43904db is 82, key is hbase:meta,,1/info:regioninfo/1733472973226/Put/seqid=0 2024-12-06T08:17:51,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741878_1054 (size=5672) 2024-12-06T08:17:51,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741878_1054 (size=5672) 2024-12-06T08:17:51,317 INFO [M:0;b6b797fc3981:42321 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0418497bce2a4479b6306758e43904db 2024-12-06T08:17:51,336 DEBUG [M:0;b6b797fc3981:42321 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ca8003516a2546e18e7a7c6db490d8a0 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1733472974136/Put/seqid=0 2024-12-06T08:17:51,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741879_1055 (size=7277) 2024-12-06T08:17:51,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741879_1055 (size=7277) 2024-12-06T08:17:51,342 INFO [M:0;b6b797fc3981:42321 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.83 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ca8003516a2546e18e7a7c6db490d8a0 2024-12-06T08:17:51,347 INFO [M:0;b6b797fc3981:42321 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ca8003516a2546e18e7a7c6db490d8a0 2024-12-06T08:17:51,361 DEBUG [M:0;b6b797fc3981:42321 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/907cf692383147468b3db40e51a55045 is 69, key is b6b797fc3981,41981,1733472972440/rs:state/1733472972679/Put/seqid=0 2024-12-06T08:17:51,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741880_1056 (size=5156) 2024-12-06T08:17:51,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741880_1056 (size=5156) 2024-12-06T08:17:51,367 INFO [M:0;b6b797fc3981:42321 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/907cf692383147468b3db40e51a55045 2024-12-06T08:17:51,386 DEBUG [M:0;b6b797fc3981:42321 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2638866813b14a958ddf424bfa1735c0 is 52, key is load_balancer_on/state:d/1733472973771/Put/seqid=0 2024-12-06T08:17:51,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741881_1057 (size=5056) 2024-12-06T08:17:51,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741881_1057 (size=5056) 2024-12-06T08:17:51,392 INFO [M:0;b6b797fc3981:42321 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2638866813b14a958ddf424bfa1735c0 2024-12-06T08:17:51,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:51,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41981-0x100666627430001, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:51,394 INFO [RS:0;b6b797fc3981:41981 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,41981,1733472972440; zookeeper connection closed. 2024-12-06T08:17:51,394 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68897002 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68897002 2024-12-06T08:17:51,394 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T08:17:51,401 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0418497bce2a4479b6306758e43904db as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0418497bce2a4479b6306758e43904db 2024-12-06T08:17:51,405 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0418497bce2a4479b6306758e43904db, entries=8, sequenceid=164, filesize=5.5 K 2024-12-06T08:17:51,406 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ca8003516a2546e18e7a7c6db490d8a0 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ca8003516a2546e18e7a7c6db490d8a0 2024-12-06T08:17:51,410 INFO [M:0;b6b797fc3981:42321 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ca8003516a2546e18e7a7c6db490d8a0 2024-12-06T08:17:51,411 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ca8003516a2546e18e7a7c6db490d8a0, entries=18, sequenceid=164, filesize=7.1 K 2024-12-06T08:17:51,411 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/907cf692383147468b3db40e51a55045 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/907cf692383147468b3db40e51a55045 2024-12-06T08:17:51,415 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/907cf692383147468b3db40e51a55045, entries=1, sequenceid=164, filesize=5.0 K 2024-12-06T08:17:51,416 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2638866813b14a958ddf424bfa1735c0 as hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2638866813b14a958ddf424bfa1735c0 2024-12-06T08:17:51,421 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35589/user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2638866813b14a958ddf424bfa1735c0, entries=1, sequenceid=164, filesize=4.9 K 2024-12-06T08:17:51,422 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(3040): Finished flush of dataSize ~66.44 KB/68031, heapSize ~81.63 KB/83584, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=164, compaction requested=false 2024-12-06T08:17:51,423 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:51,423 DEBUG [M:0;b6b797fc3981:42321 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:17:51,424 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/ac2248fa-2b34-e084-b12d-792e5275a418/MasterData/WALs/b6b797fc3981,42321,1733472972396 2024-12-06T08:17:51,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33427 is added to blk_1073741830_1006 (size=79260) 2024-12-06T08:17:51,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741830_1006 (size=79260) 2024-12-06T08:17:51,426 INFO [M:0;b6b797fc3981:42321 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T08:17:51,426 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:17:51,426 INFO [M:0;b6b797fc3981:42321 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42321 2024-12-06T08:17:51,433 DEBUG [M:0;b6b797fc3981:42321 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b6b797fc3981,42321,1733472972396 already deleted, retry=false 2024-12-06T08:17:51,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:51,536 INFO [M:0;b6b797fc3981:42321 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,42321,1733472972396; zookeeper connection closed. 2024-12-06T08:17:51,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42321-0x100666627430000, quorum=127.0.0.1:62431, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:51,538 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b425372{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:17:51,539 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c86dd56{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:17:51,539 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:17:51,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@682fde91{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:17:51,539 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@339b423a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir/,STOPPED} 2024-12-06T08:17:51,540 WARN [BP-294598891-172.17.0.2-1733472971637 heartbeating to localhost/127.0.0.1:35589 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:17:51,540 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:17:51,540 WARN [BP-294598891-172.17.0.2-1733472971637 heartbeating to localhost/127.0.0.1:35589 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-294598891-172.17.0.2-1733472971637 (Datanode Uuid 331a1894-778f-4031-ba25-24c4424b1549) service to localhost/127.0.0.1:35589 2024-12-06T08:17:51,540 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:17:51,541 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data3/current/BP-294598891-172.17.0.2-1733472971637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:51,541 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data4/current/BP-294598891-172.17.0.2-1733472971637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:51,542 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:17:51,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@325f9d5d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:17:51,544 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1df1886a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:17:51,544 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:17:51,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e98bd1c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:17:51,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34331c55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir/,STOPPED} 2024-12-06T08:17:51,545 WARN [BP-294598891-172.17.0.2-1733472971637 heartbeating to localhost/127.0.0.1:35589 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:17:51,545 WARN [BP-294598891-172.17.0.2-1733472971637 heartbeating to localhost/127.0.0.1:35589 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-294598891-172.17.0.2-1733472971637 (Datanode Uuid 68e91242-1fe4-4e4e-86ef-b8bee2f0ebe0) service to localhost/127.0.0.1:35589 2024-12-06T08:17:51,545 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:17:51,545 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:17:51,546 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data1/current/BP-294598891-172.17.0.2-1733472971637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:51,546 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/cluster_5a143fe0-2dac-c275-e2ac-ebb977966522/dfs/data/data2/current/BP-294598891-172.17.0.2-1733472971637 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:51,546 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:17:51,552 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f3b8626{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:17:51,552 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21b074b4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:17:51,553 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:17:51,553 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7cef3938{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:17:51,553 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8418220{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir/,STOPPED} 2024-12-06T08:17:51,560 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:17:51,588 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T08:17:51,596 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=124 (was 111) - Thread LEAK? -, OpenFileDescriptor=487 (was 465) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=124 (was 110) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7801 (was 7867) 2024-12-06T08:17:51,603 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=125, OpenFileDescriptor=487, MaxFileDescriptor=1048576, SystemLoadAverage=124, ProcessCount=11, AvailableMemoryMB=7800 2024-12-06T08:17:51,603 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-06T08:17:51,603 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.log.dir so I do NOT create it in target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61 2024-12-06T08:17:51,603 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b1a63a61-1a5d-0e22-f14a-6aa0c2e2da19/hadoop.tmp.dir so I do NOT create it in target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772, deleteOnExit=true 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/test.cache.data in system properties and HBase conf 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.tmp.dir in system properties and HBase conf 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.log.dir in system properties and HBase conf 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-06T08:17:51,604 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-06T08:17:51,604 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/nfs.dump.dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/java.io.tmpdir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-06T08:17:51,605 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-06T08:17:51,618 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:17:51,706 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:17:51,710 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:17:51,715 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:17:51,715 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:17:51,716 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-06T08:17:51,716 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:17:51,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23eb3448{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:17:51,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@677fba52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:17:51,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@313083fb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/java.io.tmpdir/jetty-localhost-41913-hadoop-hdfs-3_4_1-tests_jar-_-any-14163724139017892244/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:17:51,847 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@292559c2{HTTP/1.1, (http/1.1)}{localhost:41913} 2024-12-06T08:17:51,847 INFO [Time-limited test {}] server.Server(415): Started @386230ms 2024-12-06T08:17:51,860 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-06T08:17:51,921 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:17:51,924 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:17:51,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:17:51,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:17:51,925 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:17:51,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@395f3638{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:17:51,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4884e0ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:17:51,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:52,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3adf0e31{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/java.io.tmpdir/jetty-localhost-45079-hadoop-hdfs-3_4_1-tests_jar-_-any-5059568439210505608/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:17:52,039 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14b5bdc3{HTTP/1.1, (http/1.1)}{localhost:45079} 2024-12-06T08:17:52,039 INFO [Time-limited test {}] server.Server(415): Started @386421ms 2024-12-06T08:17:52,040 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:17:52,069 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-06T08:17:52,071 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-06T08:17:52,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-06T08:17:52,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-06T08:17:52,072 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-06T08:17:52,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f69da86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.log.dir/,AVAILABLE} 2024-12-06T08:17:52,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29c8b4e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-06T08:17:52,129 WARN [Thread-2196 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data1/current/BP-324744788-172.17.0.2-1733473071641/current, will proceed with Du for space computation calculation, 2024-12-06T08:17:52,129 WARN [Thread-2197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data2/current/BP-324744788-172.17.0.2-1733473071641/current, will proceed with Du for space computation calculation, 2024-12-06T08:17:52,154 WARN [Thread-2175 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:17:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe91514f63e2be1f1 with lease ID 0x3337dc5216efaad4: Processing first storage report for DS-a5197787-5deb-4585-82a9-f5d6dccf856d from datanode DatanodeRegistration(127.0.0.1:34523, datanodeUuid=b17fd63d-39a3-448f-8ca6-1f8ba7ccfaf5, infoPort=40541, infoSecurePort=0, ipcPort=40283, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641) 2024-12-06T08:17:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe91514f63e2be1f1 with lease ID 0x3337dc5216efaad4: from storage DS-a5197787-5deb-4585-82a9-f5d6dccf856d node DatanodeRegistration(127.0.0.1:34523, datanodeUuid=b17fd63d-39a3-448f-8ca6-1f8ba7ccfaf5, infoPort=40541, infoSecurePort=0, ipcPort=40283, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:17:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe91514f63e2be1f1 with lease ID 0x3337dc5216efaad4: Processing first storage report for DS-4eac9a8c-fb9d-47f0-a68c-3f8bc5a4a6bf from datanode DatanodeRegistration(127.0.0.1:34523, datanodeUuid=b17fd63d-39a3-448f-8ca6-1f8ba7ccfaf5, infoPort=40541, infoSecurePort=0, ipcPort=40283, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641) 2024-12-06T08:17:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe91514f63e2be1f1 with lease ID 0x3337dc5216efaad4: from storage DS-4eac9a8c-fb9d-47f0-a68c-3f8bc5a4a6bf node DatanodeRegistration(127.0.0.1:34523, datanodeUuid=b17fd63d-39a3-448f-8ca6-1f8ba7ccfaf5, infoPort=40541, infoSecurePort=0, ipcPort=40283, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:17:52,194 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2484b6f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/java.io.tmpdir/jetty-localhost-40463-hadoop-hdfs-3_4_1-tests_jar-_-any-5815439947455194330/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:17:52,195 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@228c056b{HTTP/1.1, (http/1.1)}{localhost:40463} 2024-12-06T08:17:52,195 INFO [Time-limited test {}] server.Server(415): Started @386578ms 2024-12-06T08:17:52,196 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-06T08:17:52,285 WARN [Thread-2223 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data4/current/BP-324744788-172.17.0.2-1733473071641/current, will proceed with Du for space computation calculation, 2024-12-06T08:17:52,285 WARN [Thread-2222 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data3/current/BP-324744788-172.17.0.2-1733473071641/current, will proceed with Du for space computation calculation, 2024-12-06T08:17:52,305 WARN [Thread-2211 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-06T08:17:52,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb190a21e4f388d05 with lease ID 0x3337dc5216efaad5: Processing first storage report for DS-1e3123c1-74b6-4938-8378-5377d52ccfe9 from datanode DatanodeRegistration(127.0.0.1:44633, datanodeUuid=9357696b-2237-42b8-9c52-e9bb7d16e4a1, infoPort=43961, infoSecurePort=0, ipcPort=43567, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641) 2024-12-06T08:17:52,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb190a21e4f388d05 with lease ID 0x3337dc5216efaad5: from storage DS-1e3123c1-74b6-4938-8378-5377d52ccfe9 node DatanodeRegistration(127.0.0.1:44633, datanodeUuid=9357696b-2237-42b8-9c52-e9bb7d16e4a1, infoPort=43961, infoSecurePort=0, ipcPort=43567, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:17:52,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb190a21e4f388d05 with lease ID 0x3337dc5216efaad5: Processing first storage report for DS-0aeb1c47-b4f9-41c7-9870-5eb7d3474a59 from datanode DatanodeRegistration(127.0.0.1:44633, datanodeUuid=9357696b-2237-42b8-9c52-e9bb7d16e4a1, infoPort=43961, infoSecurePort=0, ipcPort=43567, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641) 2024-12-06T08:17:52,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb190a21e4f388d05 with lease ID 0x3337dc5216efaad5: from storage DS-0aeb1c47-b4f9-41c7-9870-5eb7d3474a59 node DatanodeRegistration(127.0.0.1:44633, datanodeUuid=9357696b-2237-42b8-9c52-e9bb7d16e4a1, infoPort=43961, infoSecurePort=0, ipcPort=43567, storageInfo=lv=-57;cid=testClusterID;nsid=108018909;c=1733473071641), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-06T08:17:52,322 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61 2024-12-06T08:17:52,326 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/zookeeper_0, clientPort=65050, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-06T08:17:52,327 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=65050 2024-12-06T08:17:52,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:52,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:17:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741825_1001 (size=7) 2024-12-06T08:17:52,338 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4 with version=8 2024-12-06T08:17:52,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:39643/user/jenkins/test-data/4243083a-22a4-640c-8314-d5eed967ad24/hbase-staging 2024-12-06T08:17:52,340 INFO [Time-limited test {}] client.ConnectionUtils(129): master/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:17:52,340 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:17:52,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:17:52,341 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:17:52,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:17:52,341 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:17:52,341 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:17:52,341 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:17:52,342 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33903 2024-12-06T08:17:52,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:52,343 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:52,346 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33903 connecting to ZooKeeper ensemble=127.0.0.1:65050 2024-12-06T08:17:52,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339030x0, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:17:52,354 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33903-0x1006667adaa0000 connected 2024-12-06T08:17:52,375 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:17:52,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:17:52,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:17:52,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33903 2024-12-06T08:17:52,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33903 2024-12-06T08:17:52,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33903 2024-12-06T08:17:52,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33903 2024-12-06T08:17:52,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33903 2024-12-06T08:17:52,386 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4, hbase.cluster.distributed=false 2024-12-06T08:17:52,402 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/b6b797fc3981:0 server-side Connection retries=45 2024-12-06T08:17:52,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:17:52,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-06T08:17:52,402 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-06T08:17:52,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-06T08:17:52,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-06T08:17:52,402 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-06T08:17:52,403 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-06T08:17:52,403 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:40063 2024-12-06T08:17:52,404 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-06T08:17:52,406 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-06T08:17:52,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:52,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:52,410 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:40063 connecting to ZooKeeper ensemble=127.0.0.1:65050 2024-12-06T08:17:52,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400630x0, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-06T08:17:52,413 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40063-0x1006667adaa0001 connected 2024-12-06T08:17:52,413 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:17:52,414 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:17:52,414 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-06T08:17:52,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40063 2024-12-06T08:17:52,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40063 2024-12-06T08:17:52,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40063 2024-12-06T08:17:52,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40063 2024-12-06T08:17:52,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40063 2024-12-06T08:17:52,421 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/b6b797fc3981,33903,1733473072340 2024-12-06T08:17:52,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:17:52,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:17:52,423 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b6b797fc3981,33903,1733473072340 2024-12-06T08:17:52,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:17:52,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-06T08:17:52,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,425 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:17:52,426 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b6b797fc3981,33903,1733473072340 from backup master directory 2024-12-06T08:17:52,426 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-06T08:17:52,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:17:52,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b6b797fc3981,33903,1733473072340 2024-12-06T08:17:52,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-06T08:17:52,427 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:17:52,427 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b6b797fc3981,33903,1733473072340 2024-12-06T08:17:52,433 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b6b797fc3981:33903 2024-12-06T08:17:52,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:17:52,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741826_1002 (size=42) 2024-12-06T08:17:52,439 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/hbase.id with ID: c1e61685-4322-4ae9-95a0-3c5ffd5ee2fb 2024-12-06T08:17:52,448 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:52,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:17:52,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741827_1003 (size=196) 2024-12-06T08:17:52,457 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-06T08:17:52,458 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-06T08:17:52,458 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:17:52,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:17:52,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741828_1004 (size=1189) 2024-12-06T08:17:52,466 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store 2024-12-06T08:17:52,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:17:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741829_1005 (size=34) 2024-12-06T08:17:52,475 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:17:52,475 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:17:52,475 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:52,475 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:52,475 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:17:52,475 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:52,475 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:52,475 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:17:52,476 WARN [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/.initializing 2024-12-06T08:17:52,476 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/WALs/b6b797fc3981,33903,1733473072340 2024-12-06T08:17:52,478 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C33903%2C1733473072340, suffix=, logDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/WALs/b6b797fc3981,33903,1733473072340, archiveDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/oldWALs, maxLogs=10 2024-12-06T08:17:52,479 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C33903%2C1733473072340.1733473072479 2024-12-06T08:17:52,488 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/WALs/b6b797fc3981,33903,1733473072340/b6b797fc3981%2C33903%2C1733473072340.1733473072479 2024-12-06T08:17:52,488 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40541:40541),(127.0.0.1/127.0.0.1:43961:43961)] 2024-12-06T08:17:52,488 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:17:52,489 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:17:52,489 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,489 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,490 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-06T08:17:52,492 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:17:52,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-06T08:17:52,494 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:17:52,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-06T08:17:52,496 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:17:52,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,497 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-06T08:17:52,497 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:17:52,498 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,499 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,500 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-06T08:17:52,501 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-06T08:17:52,504 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:17:52,505 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784449, jitterRate=-0.0025224238634109497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-06T08:17:52,505 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:17:52,508 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-06T08:17:52,510 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a98ac83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:17:52,511 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-06T08:17:52,511 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-06T08:17:52,511 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-06T08:17:52,511 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-06T08:17:52,512 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-12-06T08:17:52,512 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-12-06T08:17:52,512 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-06T08:17:52,514 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-06T08:17:52,515 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-06T08:17:52,516 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-06T08:17:52,516 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-06T08:17:52,517 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-06T08:17:52,518 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-06T08:17:52,518 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-06T08:17:52,519 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-06T08:17:52,520 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-06T08:17:52,521 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-06T08:17:52,522 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-06T08:17:52,524 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-06T08:17:52,525 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-06T08:17:52,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:17:52,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-06T08:17:52,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,527 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=b6b797fc3981,33903,1733473072340, sessionid=0x1006667adaa0000, setting cluster-up flag (Was=false) 2024-12-06T08:17:52,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,535 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-06T08:17:52,536 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,33903,1733473072340 2024-12-06T08:17:52,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,544 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-06T08:17:52,544 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b6b797fc3981,33903,1733473072340 2024-12-06T08:17:52,546 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-06T08:17:52,546 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-06T08:17:52,547 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b6b797fc3981,33903,1733473072340 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b6b797fc3981:0, corePoolSize=5, maxPoolSize=5 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b6b797fc3981:0, corePoolSize=10, maxPoolSize=10 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:17:52,547 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,548 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733473102548 2024-12-06T08:17:52,548 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-06T08:17:52,548 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-06T08:17:52,549 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-06T08:17:52,549 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-06T08:17:52,549 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,550 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-06T08:17:52,550 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-06T08:17:52,550 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:17:52,550 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733473072550,5,FailOnTimeoutGroup] 2024-12-06T08:17:52,550 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733473072550,5,FailOnTimeoutGroup] 2024-12-06T08:17:52,550 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,550 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-06T08:17:52,550 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,550 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:17:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741831_1007 (size=1039) 2024-12-06T08:17:52,556 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-06T08:17:52,556 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4 2024-12-06T08:17:52,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:17:52,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741832_1008 (size=32) 2024-12-06T08:17:52,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:17:52,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:17:52,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:17:52,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:17:52,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:17:52,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:17:52,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:17:52,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:17:52,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:17:52,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:52,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:17:52,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740 2024-12-06T08:17:52,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740 2024-12-06T08:17:52,570 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:17:52,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:17:52,573 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:17:52,574 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849134, jitterRate=0.07973062992095947}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:17:52,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:17:52,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:17:52,574 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:17:52,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:17:52,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:17:52,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:17:52,574 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:17:52,574 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:17:52,575 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-06T08:17:52,575 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-06T08:17:52,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-06T08:17:52,576 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-06T08:17:52,577 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-06T08:17:52,632 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b6b797fc3981:40063 2024-12-06T08:17:52,633 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1008): ClusterId : c1e61685-4322-4ae9-95a0-3c5ffd5ee2fb 2024-12-06T08:17:52,633 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-06T08:17:52,635 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-06T08:17:52,635 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-06T08:17:52,638 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-06T08:17:52,638 DEBUG [RS:0;b6b797fc3981:40063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6078499b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:17:52,638 DEBUG [RS:0;b6b797fc3981:40063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14e4a377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:17:52,638 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-06T08:17:52,638 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-06T08:17:52,638 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-06T08:17:52,639 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(3073): reportForDuty to master=b6b797fc3981,33903,1733473072340 with isa=b6b797fc3981/172.17.0.2:40063, startcode=1733473072401 2024-12-06T08:17:52,639 DEBUG [RS:0;b6b797fc3981:40063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-06T08:17:52,641 INFO [RS-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40083, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-06T08:17:52,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33903 {}] master.ServerManager(332): Checking decommissioned status of RegionServer b6b797fc3981,40063,1733473072401 2024-12-06T08:17:52,641 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33903 {}] master.ServerManager(486): Registering regionserver=b6b797fc3981,40063,1733473072401 2024-12-06T08:17:52,642 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4 2024-12-06T08:17:52,642 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:42947 2024-12-06T08:17:52,642 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-06T08:17:52,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:17:52,644 DEBUG [RS:0;b6b797fc3981:40063 {}] zookeeper.ZKUtil(111): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b6b797fc3981,40063,1733473072401 2024-12-06T08:17:52,644 WARN [RS:0;b6b797fc3981:40063 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-06T08:17:52,645 INFO [RS:0;b6b797fc3981:40063 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:17:52,645 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/b6b797fc3981,40063,1733473072401 2024-12-06T08:17:52,645 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b6b797fc3981,40063,1733473072401] 2024-12-06T08:17:52,648 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-06T08:17:52,648 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-06T08:17:52,649 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-06T08:17:52,650 INFO [RS:0;b6b797fc3981:40063 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-06T08:17:52,650 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,650 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-06T08:17:52,651 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b6b797fc3981:0, corePoolSize=2, maxPoolSize=2 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b6b797fc3981:0, corePoolSize=1, maxPoolSize=1 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:17:52,651 DEBUG [RS:0;b6b797fc3981:40063 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b6b797fc3981:0, corePoolSize=3, maxPoolSize=3 2024-12-06T08:17:52,652 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,652 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,652 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,652 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,652 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,40063,1733473072401-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:17:52,670 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-06T08:17:52,670 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,40063,1733473072401-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:52,684 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.Replication(204): b6b797fc3981,40063,1733473072401 started 2024-12-06T08:17:52,684 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1767): Serving as b6b797fc3981,40063,1733473072401, RpcServer on b6b797fc3981/172.17.0.2:40063, sessionid=0x1006667adaa0001 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b6b797fc3981,40063,1733473072401 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,40063,1733473072401' 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b6b797fc3981,40063,1733473072401 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b6b797fc3981,40063,1733473072401' 2024-12-06T08:17:52,685 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-06T08:17:52,686 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-06T08:17:52,686 DEBUG [RS:0;b6b797fc3981:40063 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-06T08:17:52,686 INFO [RS:0;b6b797fc3981:40063 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-06T08:17:52,686 INFO [RS:0;b6b797fc3981:40063 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-06T08:17:52,694 INFO [regionserver/b6b797fc3981:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:17:52,727 WARN [b6b797fc3981:33903 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-06T08:17:52,788 INFO [RS:0;b6b797fc3981:40063 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C40063%2C1733473072401, suffix=, logDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/b6b797fc3981,40063,1733473072401, archiveDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/oldWALs, maxLogs=32 2024-12-06T08:17:52,789 INFO [RS:0;b6b797fc3981:40063 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C40063%2C1733473072401.1733473072789 2024-12-06T08:17:52,794 INFO [RS:0;b6b797fc3981:40063 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/b6b797fc3981,40063,1733473072401/b6b797fc3981%2C40063%2C1733473072401.1733473072789 2024-12-06T08:17:52,794 DEBUG [RS:0;b6b797fc3981:40063 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40541:40541),(127.0.0.1/127.0.0.1:43961:43961)] 2024-12-06T08:17:52,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:52,977 DEBUG [b6b797fc3981:33903 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-06T08:17:52,978 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b6b797fc3981,40063,1733473072401 2024-12-06T08:17:52,979 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,40063,1733473072401, state=OPENING 2024-12-06T08:17:52,980 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-06T08:17:52,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:52,983 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:17:52,983 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:17:52,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=b6b797fc3981,40063,1733473072401}] 2024-12-06T08:17:53,135 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,135 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-06T08:17:53,137 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-06T08:17:53,140 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-06T08:17:53,140 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:17:53,142 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b6b797fc3981%2C40063%2C1733473072401.meta, suffix=.meta, logDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/b6b797fc3981,40063,1733473072401, archiveDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/oldWALs, maxLogs=32 2024-12-06T08:17:53,142 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b6b797fc3981%2C40063%2C1733473072401.meta.1733473073142.meta 2024-12-06T08:17:53,147 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/b6b797fc3981,40063,1733473072401/b6b797fc3981%2C40063%2C1733473072401.meta.1733473073142.meta 2024-12-06T08:17:53,147 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40541:40541),(127.0.0.1/127.0.0.1:43961:43961)] 2024-12-06T08:17:53,148 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:17:53,148 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-06T08:17:53,148 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-06T08:17:53,148 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-06T08:17:53,148 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-06T08:17:53,148 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:17:53,148 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-06T08:17:53,149 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-06T08:17:53,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-06T08:17:53,151 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-06T08:17:53,151 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:53,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:17:53,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-06T08:17:53,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-06T08:17:53,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:53,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:17:53,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-06T08:17:53,153 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-06T08:17:53,153 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:53,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-06T08:17:53,154 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740 2024-12-06T08:17:53,155 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740 2024-12-06T08:17:53,157 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-06T08:17:53,158 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-06T08:17:53,159 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830160, jitterRate=0.05560345947742462}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-06T08:17:53,159 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-06T08:17:53,159 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733473073135 2024-12-06T08:17:53,161 DEBUG [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-06T08:17:53,161 INFO [RS_OPEN_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-06T08:17:53,162 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,162 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b6b797fc3981,40063,1733473072401, state=OPEN 2024-12-06T08:17:53,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:17:53,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-06T08:17:53,168 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:17:53,168 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-06T08:17:53,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-06T08:17:53,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=b6b797fc3981,40063,1733473072401 in 185 msec 2024-12-06T08:17:53,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-06T08:17:53,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 595 msec 2024-12-06T08:17:53,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 627 msec 2024-12-06T08:17:53,173 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733473073173, completionTime=-1 2024-12-06T08:17:53,173 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-06T08:17:53,173 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-06T08:17:53,174 DEBUG [hconnection-0x4f8daa34-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:17:53,175 INFO [RS-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:17:53,176 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-06T08:17:53,176 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733473133176 2024-12-06T08:17:53,176 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733473193176 2024-12-06T08:17:53,176 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 2 msec 2024-12-06T08:17:53,181 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33903,1733473072340-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:53,181 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33903,1733473072340-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:53,181 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33903,1733473072340-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:53,181 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b6b797fc3981:33903, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:53,181 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:53,181 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-06T08:17:53,181 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-06T08:17:53,182 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-06T08:17:53,182 DEBUG [master/b6b797fc3981:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-06T08:17:53,183 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-06T08:17:53,183 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:53,184 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-06T08:17:53,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:17:53,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741835_1011 (size=358) 2024-12-06T08:17:53,193 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d1498c51055d38c772cd526e782a834f, NAME => 'hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4 2024-12-06T08:17:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:17:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741836_1012 (size=42) 2024-12-06T08:17:53,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:17:53,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing d1498c51055d38c772cd526e782a834f, disabling compactions & flushes 2024-12-06T08:17:53,200 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. after waiting 0 ms 2024-12-06T08:17:53,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,200 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,200 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for d1498c51055d38c772cd526e782a834f: 2024-12-06T08:17:53,201 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-06T08:17:53,202 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733473073201"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733473073201"}]},"ts":"1733473073201"} 2024-12-06T08:17:53,203 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-06T08:17:53,204 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-06T08:17:53,204 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473073204"}]},"ts":"1733473073204"} 2024-12-06T08:17:53,205 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-06T08:17:53,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d1498c51055d38c772cd526e782a834f, ASSIGN}] 2024-12-06T08:17:53,209 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d1498c51055d38c772cd526e782a834f, ASSIGN 2024-12-06T08:17:53,210 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=d1498c51055d38c772cd526e782a834f, ASSIGN; state=OFFLINE, location=b6b797fc3981,40063,1733473072401; forceNewPlan=false, retain=false 2024-12-06T08:17:53,361 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d1498c51055d38c772cd526e782a834f, regionState=OPENING, regionLocation=b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure d1498c51055d38c772cd526e782a834f, server=b6b797fc3981,40063,1733473072401}] 2024-12-06T08:17:53,515 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,518 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,518 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => d1498c51055d38c772cd526e782a834f, NAME => 'hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f.', STARTKEY => '', ENDKEY => ''} 2024-12-06T08:17:53,518 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,519 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-06T08:17:53,519 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,519 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,520 INFO [StoreOpener-d1498c51055d38c772cd526e782a834f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,521 INFO [StoreOpener-d1498c51055d38c772cd526e782a834f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d1498c51055d38c772cd526e782a834f columnFamilyName info 2024-12-06T08:17:53,521 DEBUG [StoreOpener-d1498c51055d38c772cd526e782a834f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-06T08:17:53,521 INFO [StoreOpener-d1498c51055d38c772cd526e782a834f-1 {}] regionserver.HStore(327): Store=d1498c51055d38c772cd526e782a834f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-06T08:17:53,522 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,522 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,524 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,526 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-06T08:17:53,526 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened d1498c51055d38c772cd526e782a834f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789916, jitterRate=0.004430666565895081}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-06T08:17:53,527 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for d1498c51055d38c772cd526e782a834f: 2024-12-06T08:17:53,527 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f., pid=6, masterSystemTime=1733473073515 2024-12-06T08:17:53,529 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,529 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,529 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d1498c51055d38c772cd526e782a834f, regionState=OPEN, openSeqNum=2, regionLocation=b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-06T08:17:53,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure d1498c51055d38c772cd526e782a834f, server=b6b797fc3981,40063,1733473072401 in 168 msec 2024-12-06T08:17:53,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-06T08:17:53,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=d1498c51055d38c772cd526e782a834f, ASSIGN in 324 msec 2024-12-06T08:17:53,534 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-06T08:17:53,534 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733473073534"}]},"ts":"1733473073534"} 2024-12-06T08:17:53,535 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-06T08:17:53,537 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-06T08:17:53,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 356 msec 2024-12-06T08:17:53,583 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-06T08:17:53,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:17:53,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:53,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:53,589 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-06T08:17:53,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:17:53,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 9 msec 2024-12-06T08:17:53,601 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-06T08:17:53,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-06T08:17:53,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 9 msec 2024-12-06T08:17:53,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-06T08:17:53,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.191sec 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33903,1733473072340-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-06T08:17:53,618 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33903,1733473072340-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-06T08:17:53,620 DEBUG [master/b6b797fc3981:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-06T08:17:53,620 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-06T08:17:53,620 INFO [master/b6b797fc3981:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b6b797fc3981,33903,1733473072340-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-06T08:17:53,622 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b341545 to 127.0.0.1:65050 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@35fff8c 2024-12-06T08:17:53,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@628d8560, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-06T08:17:53,628 DEBUG [hconnection-0x76350cd6-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-06T08:17:53,629 INFO [RS-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45746, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-06T08:17:53,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=b6b797fc3981,33903,1733473072340 2024-12-06T08:17:53,631 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-06T08:17:53,633 INFO [Time-limited test {}] master.MasterRpcServices(506): Client=null/null set balanceSwitch=false 2024-12-06T08:17:53,633 INFO [Time-limited test {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-06T08:17:53,634 INFO [Time-limited test {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/test.com,8080,1, archiveDir=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/oldWALs, maxLogs=32 2024-12-06T08:17:53,635 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733473073635 2024-12-06T08:17:53,639 INFO [Time-limited test {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/test.com,8080,1/test.com%2C8080%2C1.1733473073635 2024-12-06T08:17:53,639 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43961:43961),(127.0.0.1/127.0.0.1:40541:40541)] 2024-12-06T08:17:53,640 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733473073639 2024-12-06T08:17:53,645 INFO [Time-limited test {}] wal.AbstractFSWAL(837): Rolled WAL /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/test.com,8080,1/test.com%2C8080%2C1.1733473073635 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/test.com,8080,1/test.com%2C8080%2C1.1733473073639 2024-12-06T08:17:53,645 DEBUG [Time-limited test {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43961:43961),(127.0.0.1/127.0.0.1:40541:40541)] 2024-12-06T08:17:53,645 DEBUG [Time-limited test {}] wal.AbstractFSWAL(751): hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/test.com,8080,1/test.com%2C8080%2C1.1733473073635 is not closed yet, will try archiving it next time 2024-12-06T08:17:53,646 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/test.com,8080,1 2024-12-06T08:17:53,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741837_1013 (size=93) 2024-12-06T08:17:53,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741837_1013 (size=93) 2024-12-06T08:17:53,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741838_1014 (size=93) 2024-12-06T08:17:53,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741838_1014 (size=93) 2024-12-06T08:17:53,648 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(818): Archiving hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/test.com,8080,1/test.com%2C8080%2C1.1733473073635 to hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/oldWALs/test.com%2C8080%2C1.1733473073635 2024-12-06T08:17:53,650 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/oldWALs 2024-12-06T08:17:53,650 INFO [Time-limited test {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733473073639) 2024-12-06T08:17:53,651 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-06T08:17:53,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b341545 to 127.0.0.1:65050 2024-12-06T08:17:53,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:53,651 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-06T08:17:53,651 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1500974240, stopped=false 2024-12-06T08:17:53,651 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=b6b797fc3981,33903,1733473072340 2024-12-06T08:17:53,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:17:53,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-06T08:17:53,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:53,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:53,654 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-06T08:17:53,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:17:53,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-06T08:17:53,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:53,654 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,40063,1733473072401' ***** 2024-12-06T08:17:53,654 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-06T08:17:53,655 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-06T08:17:53,655 INFO [RS:0;b6b797fc3981:40063 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-06T08:17:53,655 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-06T08:17:53,655 INFO [RS:0;b6b797fc3981:40063 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-06T08:17:53,655 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(3579): Received CLOSE for d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,655 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,656 DEBUG [RS:0;b6b797fc3981:40063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:53,656 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-06T08:17:53,656 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d1498c51055d38c772cd526e782a834f, disabling compactions & flushes 2024-12-06T08:17:53,656 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-06T08:17:53,656 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,656 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. after waiting 0 ms 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,656 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing d1498c51055d38c772cd526e782a834f 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-06T08:17:53,656 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-06T08:17:53,656 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, d1498c51055d38c772cd526e782a834f=hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f.} 2024-12-06T08:17:53,656 DEBUG [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, d1498c51055d38c772cd526e782a834f 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-06T08:17:53,656 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-06T08:17:53,656 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-06T08:17:53,656 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-06T08:17:53,656 INFO [regionserver/b6b797fc3981:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-06T08:17:53,656 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=1.23 KB heapSize=2.87 KB 2024-12-06T08:17:53,671 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/.tmp/info/82b5b798930c4b5a89bd44494a62d514 is 143, key is hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f./info:regioninfo/1733473073529/Put/seqid=0 2024-12-06T08:17:53,672 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f/.tmp/info/ea82fdc7224549b88d14d0b764d95c92 is 45, key is default/info:d/1733473073593/Put/seqid=0 2024-12-06T08:17:53,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741840_1016 (size=5037) 2024-12-06T08:17:53,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741839_1015 (size=6595) 2024-12-06T08:17:53,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741840_1016 (size=5037) 2024-12-06T08:17:53,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741839_1015 (size=6595) 2024-12-06T08:17:53,678 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f/.tmp/info/ea82fdc7224549b88d14d0b764d95c92 2024-12-06T08:17:53,678 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.14 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/.tmp/info/82b5b798930c4b5a89bd44494a62d514 2024-12-06T08:17:53,683 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f/.tmp/info/ea82fdc7224549b88d14d0b764d95c92 as hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f/info/ea82fdc7224549b88d14d0b764d95c92 2024-12-06T08:17:53,689 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f/info/ea82fdc7224549b88d14d0b764d95c92, entries=2, sequenceid=6, filesize=4.9 K 2024-12-06T08:17:53,690 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for d1498c51055d38c772cd526e782a834f in 34ms, sequenceid=6, compaction requested=false 2024-12-06T08:17:53,695 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/namespace/d1498c51055d38c772cd526e782a834f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-06T08:17:53,695 INFO [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,695 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d1498c51055d38c772cd526e782a834f: 2024-12-06T08:17:53,695 DEBUG [RS_CLOSE_REGION-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733473073181.d1498c51055d38c772cd526e782a834f. 2024-12-06T08:17:53,698 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/.tmp/table/83bd4dc735414b9c9ed851d15b3408ad is 51, key is hbase:namespace/table:state/1733473073534/Put/seqid=0 2024-12-06T08:17:53,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741841_1017 (size=5242) 2024-12-06T08:17:53,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741841_1017 (size=5242) 2024-12-06T08:17:53,703 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=94 B at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/.tmp/table/83bd4dc735414b9c9ed851d15b3408ad 2024-12-06T08:17:53,708 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/.tmp/info/82b5b798930c4b5a89bd44494a62d514 as hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/info/82b5b798930c4b5a89bd44494a62d514 2024-12-06T08:17:53,712 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/info/82b5b798930c4b5a89bd44494a62d514, entries=10, sequenceid=9, filesize=6.4 K 2024-12-06T08:17:53,713 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/.tmp/table/83bd4dc735414b9c9ed851d15b3408ad as hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/table/83bd4dc735414b9c9ed851d15b3408ad 2024-12-06T08:17:53,717 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/table/83bd4dc735414b9c9ed851d15b3408ad, entries=2, sequenceid=9, filesize=5.1 K 2024-12-06T08:17:53,718 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~1.23 KB/1264, heapSize ~2.59 KB/2648, currentSize=0 B/0 for 1588230740 in 62ms, sequenceid=9, compaction requested=false 2024-12-06T08:17:53,722 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/data/hbase/meta/1588230740/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-12-06T08:17:53,722 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-06T08:17:53,723 INFO [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-06T08:17:53,723 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-06T08:17:53,723 DEBUG [RS_CLOSE_META-regionserver/b6b797fc3981:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-06T08:17:53,856 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,40063,1733473072401; all regions closed. 2024-12-06T08:17:53,857 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741834_1010 (size=2484) 2024-12-06T08:17:53,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741834_1010 (size=2484) 2024-12-06T08:17:53,861 DEBUG [RS:0;b6b797fc3981:40063 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/oldWALs 2024-12-06T08:17:53,861 INFO [RS:0;b6b797fc3981:40063 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C40063%2C1733473072401.meta:.meta(num 1733473073142) 2024-12-06T08:17:53,861 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/WALs/b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741833_1009 (size=1414) 2024-12-06T08:17:53,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741833_1009 (size=1414) 2024-12-06T08:17:53,865 DEBUG [RS:0;b6b797fc3981:40063 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/oldWALs 2024-12-06T08:17:53,865 INFO [RS:0;b6b797fc3981:40063 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog b6b797fc3981%2C40063%2C1733473072401:(num 1733473072789) 2024-12-06T08:17:53,865 DEBUG [RS:0;b6b797fc3981:40063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:53,865 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.LeaseManager(133): Closed leases 2024-12-06T08:17:53,865 INFO [RS:0;b6b797fc3981:40063 {}] hbase.ChoreService(370): Chore service for: regionserver/b6b797fc3981:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-06T08:17:53,865 INFO [regionserver/b6b797fc3981:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:17:53,865 INFO [RS:0;b6b797fc3981:40063 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:40063 2024-12-06T08:17:53,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b6b797fc3981,40063,1733473072401 2024-12-06T08:17:53,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-06T08:17:53,869 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b6b797fc3981,40063,1733473072401] 2024-12-06T08:17:53,869 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing b6b797fc3981,40063,1733473072401; numProcessing=1 2024-12-06T08:17:53,871 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/b6b797fc3981,40063,1733473072401 already deleted, retry=false 2024-12-06T08:17:53,871 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; b6b797fc3981,40063,1733473072401 expired; onlineServers=0 2024-12-06T08:17:53,871 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'b6b797fc3981,33903,1733473072340' ***** 2024-12-06T08:17:53,871 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-06T08:17:53,871 DEBUG [M:0;b6b797fc3981:33903 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e0a9bb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b6b797fc3981/172.17.0.2:0 2024-12-06T08:17:53,871 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HRegionServer(1224): stopping server b6b797fc3981,33903,1733473072340 2024-12-06T08:17:53,871 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HRegionServer(1250): stopping server b6b797fc3981,33903,1733473072340; all regions closed. 2024-12-06T08:17:53,871 DEBUG [M:0;b6b797fc3981:33903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-06T08:17:53,871 DEBUG [M:0;b6b797fc3981:33903 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-06T08:17:53,871 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-06T08:17:53,871 DEBUG [M:0;b6b797fc3981:33903 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-06T08:17:53,871 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733473072550 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.large.0-1733473072550,5,FailOnTimeoutGroup] 2024-12-06T08:17:53,871 DEBUG [master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733473072550 {}] cleaner.HFileCleaner(306): Exit Thread[master/b6b797fc3981:0:becomeActiveMaster-HFileCleaner.small.0-1733473072550,5,FailOnTimeoutGroup] 2024-12-06T08:17:53,871 INFO [M:0;b6b797fc3981:33903 {}] hbase.ChoreService(370): Chore service for: master/b6b797fc3981:0 had [] on shutdown 2024-12-06T08:17:53,871 DEBUG [M:0;b6b797fc3981:33903 {}] master.HMaster(1733): Stopping service threads 2024-12-06T08:17:53,871 INFO [M:0;b6b797fc3981:33903 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-06T08:17:53,872 INFO [M:0;b6b797fc3981:33903 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-06T08:17:53,872 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-06T08:17:53,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-06T08:17:53,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-06T08:17:53,873 DEBUG [M:0;b6b797fc3981:33903 {}] zookeeper.ZKUtil(347): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-06T08:17:53,873 WARN [M:0;b6b797fc3981:33903 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-06T08:17:53,873 INFO [M:0;b6b797fc3981:33903 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-06T08:17:53,873 INFO [M:0;b6b797fc3981:33903 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-06T08:17:53,873 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-06T08:17:53,873 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:53,873 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:53,873 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-06T08:17:53,873 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:53,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-06T08:17:53,873 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=25.32 KB heapSize=32.31 KB 2024-12-06T08:17:53,889 DEBUG [M:0;b6b797fc3981:33903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c723f7cdc1b74595a00c3e569f781262 is 82, key is hbase:meta,,1/info:regioninfo/1733473073161/Put/seqid=0 2024-12-06T08:17:53,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741842_1018 (size=5672) 2024-12-06T08:17:53,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741842_1018 (size=5672) 2024-12-06T08:17:53,895 INFO [M:0;b6b797fc3981:33903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c723f7cdc1b74595a00c3e569f781262 2024-12-06T08:17:53,920 DEBUG [M:0;b6b797fc3981:33903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16ae6cf3545d476db33bc84e51abfcbf is 696, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733473073538/Put/seqid=0 2024-12-06T08:17:53,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741843_1019 (size=6626) 2024-12-06T08:17:53,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741843_1019 (size=6626) 2024-12-06T08:17:53,926 INFO [M:0;b6b797fc3981:33903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.72 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16ae6cf3545d476db33bc84e51abfcbf 2024-12-06T08:17:53,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44775/user/jenkins/test-data/afc51108-4879-5e8b-7394-1820edc8a40d/WALs/b6b797fc3981,44681,1733472803392/b6b797fc3981%2C44681%2C1733472803392.meta.1733472804629.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.closeWriter(FSHLog.java:464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.lambda$doReplaceWriter$0(FSHLog.java:402) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-06T08:17:53,953 DEBUG [M:0;b6b797fc3981:33903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f37c163b2ac47f7ab33dc0287bc0acd is 69, key is b6b797fc3981,40063,1733473072401/rs:state/1733473072641/Put/seqid=0 2024-12-06T08:17:53,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741844_1020 (size=5156) 2024-12-06T08:17:53,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741844_1020 (size=5156) 2024-12-06T08:17:53,958 INFO [M:0;b6b797fc3981:33903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f37c163b2ac47f7ab33dc0287bc0acd 2024-12-06T08:17:53,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:53,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40063-0x1006667adaa0001, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:53,969 INFO [RS:0;b6b797fc3981:40063 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,40063,1733473072401; zookeeper connection closed. 2024-12-06T08:17:53,970 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2cdd8bf6 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2cdd8bf6 2024-12-06T08:17:53,970 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-06T08:17:53,978 DEBUG [M:0;b6b797fc3981:33903 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/67b0c924a99645e5b425d5a0587b9859 is 52, key is load_balancer_on/state:d/1733473073632/Put/seqid=0 2024-12-06T08:17:53,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741845_1021 (size=5056) 2024-12-06T08:17:53,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741845_1021 (size=5056) 2024-12-06T08:17:53,984 INFO [M:0;b6b797fc3981:33903 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/67b0c924a99645e5b425d5a0587b9859 2024-12-06T08:17:53,989 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c723f7cdc1b74595a00c3e569f781262 as hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c723f7cdc1b74595a00c3e569f781262 2024-12-06T08:17:53,994 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c723f7cdc1b74595a00c3e569f781262, entries=8, sequenceid=70, filesize=5.5 K 2024-12-06T08:17:53,995 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/16ae6cf3545d476db33bc84e51abfcbf as hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/16ae6cf3545d476db33bc84e51abfcbf 2024-12-06T08:17:53,999 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/16ae6cf3545d476db33bc84e51abfcbf, entries=8, sequenceid=70, filesize=6.5 K 2024-12-06T08:17:54,000 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1f37c163b2ac47f7ab33dc0287bc0acd as hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1f37c163b2ac47f7ab33dc0287bc0acd 2024-12-06T08:17:54,005 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1f37c163b2ac47f7ab33dc0287bc0acd, entries=1, sequenceid=70, filesize=5.0 K 2024-12-06T08:17:54,006 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/67b0c924a99645e5b425d5a0587b9859 as hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/67b0c924a99645e5b425d5a0587b9859 2024-12-06T08:17:54,010 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42947/user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/67b0c924a99645e5b425d5a0587b9859, entries=1, sequenceid=70, filesize=4.9 K 2024-12-06T08:17:54,011 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(3040): Finished flush of dataSize ~25.32 KB/25929, heapSize ~32.25 KB/33024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=70, compaction requested=false 2024-12-06T08:17:54,012 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-06T08:17:54,012 DEBUG [M:0;b6b797fc3981:33903 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-06T08:17:54,013 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/30146f15-4ba9-2a0c-a619-7f7308480da4/MasterData/WALs/b6b797fc3981,33903,1733473072340 2024-12-06T08:17:54,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44633 is added to blk_1073741830_1006 (size=31030) 2024-12-06T08:17:54,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34523 is added to blk_1073741830_1006 (size=31030) 2024-12-06T08:17:54,016 INFO [M:0;b6b797fc3981:33903 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-06T08:17:54,016 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-06T08:17:54,016 INFO [M:0;b6b797fc3981:33903 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33903 2024-12-06T08:17:54,018 DEBUG [M:0;b6b797fc3981:33903 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/b6b797fc3981,33903,1733473072340 already deleted, retry=false 2024-12-06T08:17:54,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:54,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33903-0x1006667adaa0000, quorum=127.0.0.1:65050, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-06T08:17:54,120 INFO [M:0;b6b797fc3981:33903 {}] regionserver.HRegionServer(1307): Exiting; stopping=b6b797fc3981,33903,1733473072340; zookeeper connection closed. 2024-12-06T08:17:54,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2484b6f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:17:54,123 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@228c056b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:17:54,123 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:17:54,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29c8b4e0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:17:54,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f69da86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.log.dir/,STOPPED} 2024-12-06T08:17:54,125 WARN [BP-324744788-172.17.0.2-1733473071641 heartbeating to localhost/127.0.0.1:42947 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:17:54,125 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:17:54,125 WARN [BP-324744788-172.17.0.2-1733473071641 heartbeating to localhost/127.0.0.1:42947 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-324744788-172.17.0.2-1733473071641 (Datanode Uuid 9357696b-2237-42b8-9c52-e9bb7d16e4a1) service to localhost/127.0.0.1:42947 2024-12-06T08:17:54,125 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:17:54,125 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data3/current/BP-324744788-172.17.0.2-1733473071641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:54,126 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data4/current/BP-324744788-172.17.0.2-1733473071641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:54,126 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:17:54,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3adf0e31{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-06T08:17:54,128 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14b5bdc3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:17:54,128 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:17:54,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4884e0ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:17:54,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@395f3638{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.log.dir/,STOPPED} 2024-12-06T08:17:54,129 WARN [BP-324744788-172.17.0.2-1733473071641 heartbeating to localhost/127.0.0.1:42947 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-06T08:17:54,129 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-06T08:17:54,129 WARN [BP-324744788-172.17.0.2-1733473071641 heartbeating to localhost/127.0.0.1:42947 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-324744788-172.17.0.2-1733473071641 (Datanode Uuid b17fd63d-39a3-448f-8ca6-1f8ba7ccfaf5) service to localhost/127.0.0.1:42947 2024-12-06T08:17:54,129 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-06T08:17:54,130 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data1/current/BP-324744788-172.17.0.2-1733473071641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:54,130 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/cluster_0eb57583-0ed3-580c-c65a-f4461b491772/dfs/data/data2/current/BP-324744788-172.17.0.2-1733473071641 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-06T08:17:54,130 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-06T08:17:54,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@313083fb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-06T08:17:54,136 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@292559c2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-06T08:17:54,136 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-06T08:17:54,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@677fba52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-06T08:17:54,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23eb3448{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/7d836cac-ff72-5f4b-57ff-2519c7cd6e61/hadoop.log.dir/,STOPPED} 2024-12-06T08:17:54,142 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-06T08:17:54,157 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-12-06T08:17:54,165 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=147 (was 125) - Thread LEAK? -, OpenFileDescriptor=516 (was 487) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=124 (was 124), ProcessCount=11 (was 11), AvailableMemoryMB=7784 (was 7800)